Lines Matching refs:ptdev
145 /** @ptdev: Device. */
146 struct panthor_device *ptdev;
534 /** @ptdev: Device. */
535 struct panthor_device *ptdev;
734 if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
748 !panthor_device_reset_is_pending((sched)->ptdev)) \
764 !panthor_device_reset_is_pending((sched)->ptdev)) \
868 struct panthor_device *ptdev = group->ptdev;
882 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo)))
887 if (drm_WARN_ON(&ptdev->base, ret))
891 if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap))
966 struct panthor_device *ptdev = group->ptdev;
968 drm_WARN_ON(&ptdev->base, group->csg_id >= 0);
969 drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node));
970 drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node));
1000 struct panthor_device *ptdev = group->ptdev;
1004 lockdep_assert_held(&ptdev->scheduler->lock);
1006 if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS ||
1007 ptdev->scheduler->csg_slots[csg_id].group))
1014 csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1042 struct panthor_device *ptdev = group->ptdev;
1045 lockdep_assert_held(&ptdev->scheduler->lock);
1047 if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS))
1050 if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE))
1053 slot = &ptdev->scheduler->csg_slots[group->csg_id];
1176 * @ptdev: Device.
1186 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1188 struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id];
1189 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1191 lockdep_assert_held(&ptdev->scheduler->lock);
1194 drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract);
1216 * @ptdev: Device.
1226 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1228 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1229 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1232 lockdep_assert_held(&ptdev->scheduler->lock);
1245 * @ptdev: Device.
1253 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
1255 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1259 lockdep_assert_held(&ptdev->scheduler->lock);
1261 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1262 endpoint_req = panthor_fw_csg_endpoint_req_get(ptdev, csg_iface);
1268 * @ptdev: Device.
1275 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1277 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1280 panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id);
1294 &group->ptdev->scheduler->groups.waiting);
1326 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id)
1328 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1332 lockdep_assert_held(&ptdev->scheduler->lock);
1339 cs_slot_sync_queue_state_locked(ptdev, csg_id, i);
1344 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id)
1346 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1352 lockdep_assert_held(&ptdev->scheduler->lock);
1354 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1380 drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)",
1393 panthor_device_schedule_reset(ptdev);
1396 csg_slot_sync_queues_state_locked(ptdev, csg_id);
1409 cs_slot_reset_locked(ptdev, csg_id, i);
1417 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
1425 lockdep_assert_held(&ptdev->scheduler->lock);
1430 if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS))
1433 csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1438 csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id);
1442 cs_slot_prog_locked(ptdev, csg_id, i);
1454 panthor_fw_csg_endpoint_req_set(ptdev, csg_iface, endpoint_req);
1476 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
1479 struct panthor_scheduler *sched = ptdev->scheduler;
1488 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1493 drm_warn(&ptdev->base, "CS_FATAL: pid=%d, comm=%s\n",
1503 panthor_device_schedule_reset(ptdev);
1509 drm_warn(&ptdev->base,
1516 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)),
1522 cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
1525 struct panthor_scheduler *sched = ptdev->scheduler;
1536 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1558 drm_warn(&ptdev->base, "CS_FAULT: pid=%d, comm=%s\n",
1562 drm_warn(&ptdev->base,
1569 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)),
1576 struct panthor_device *ptdev = group->ptdev;
1577 struct panthor_scheduler *sched = ptdev->scheduler;
1589 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1623 drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
1635 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1636 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1642 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1677 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev,
1680 struct panthor_scheduler *sched = ptdev->scheduler;
1686 if (drm_WARN_ON(&ptdev->base, !group))
1699 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev,
1705 lockdep_assert_held(&ptdev->scheduler->lock);
1707 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1713 cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id);
1716 cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id);
1719 cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id);
1729 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id)
1731 struct panthor_scheduler *sched = ptdev->scheduler;
1744 static void csg_slot_sync_update_locked(struct panthor_device *ptdev,
1747 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1750 lockdep_assert_held(&ptdev->scheduler->lock);
1755 sched_queue_work(ptdev->scheduler, sync_upd);
1759 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id)
1761 struct panthor_scheduler *sched = ptdev->scheduler;
1768 if (!drm_WARN_ON(&ptdev->base, !group)) {
1769 drm_warn(&ptdev->base, "CSG_PROGRESS_TIMER_EVENT: pid=%d, comm=%s\n",
1775 drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
1780 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id)
1786 lockdep_assert_held(&ptdev->scheduler->lock);
1788 if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1791 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1816 csg_slot_process_idle_event_locked(ptdev, csg_id);
1819 csg_slot_process_progress_timer_event_locked(ptdev, csg_id);
1825 if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id))
1832 csg_slot_sync_update_locked(ptdev, csg_id);
1837 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1840 static void sched_process_idle_event_locked(struct panthor_device *ptdev)
1842 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1844 lockdep_assert_held(&ptdev->scheduler->lock);
1848 sched_queue_delayed_work(ptdev->scheduler, tick, 0);
1853 * @ptdev: Device.
1855 static void sched_process_global_irq_locked(struct panthor_device *ptdev)
1857 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1860 lockdep_assert_held(&ptdev->scheduler->lock);
1867 sched_process_idle_event_locked(ptdev);
1875 struct panthor_device *ptdev = sched->ptdev;
1880 sched_process_global_irq_locked(ptdev);
1887 sched_process_csg_irq_locked(ptdev, csg_id);
1897 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events)
1899 if (!ptdev->scheduler)
1902 atomic_or(events, &ptdev->scheduler->fw_events);
1903 sched_queue_work(ptdev->scheduler, fw_events);
1935 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev,
1939 if (drm_WARN_ON(&ptdev->base, !mask) ||
1940 drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1948 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
1951 struct panthor_scheduler *sched = ptdev->scheduler;
1964 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1970 panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask);
1980 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1982 ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100);
1985 csg_slot_sync_priority_locked(ptdev, csg_id);
1988 csg_slot_sync_state_locked(ptdev, csg_id);
1991 csg_slot_sync_queues_state_locked(ptdev, csg_id);
1995 drm_err(&ptdev->base, "CSG %d update request timedout", csg_id);
2107 struct panthor_device *ptdev = sched->ptdev;
2129 csg_iface = panthor_fw_get_csg_iface(ptdev, i);
2136 sched_process_csg_irq_locked(ptdev, i);
2144 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2149 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2151 panthor_device_schedule_reset(ptdev);
2164 if (drm_WARN_ON(&group->ptdev->base, group_can_run(group)))
2195 sched_queue_work(group->ptdev->scheduler, sync_upd);
2218 struct panthor_device *ptdev = sched->ptdev;
2227 drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
2250 drm_WARN_ON(&ptdev->base,
2271 struct panthor_device *ptdev = sched->ptdev;
2286 if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2290 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2306 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2312 panthor_fw_csg_endpoint_req_update(ptdev, csg_iface,
2315 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2322 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2324 panthor_device_schedule_reset(ptdev);
2337 sched_process_csg_irq_locked(ptdev, group->csg_id);
2363 if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2366 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2369 csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
2370 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2374 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2381 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2383 panthor_device_schedule_reset(ptdev);
2449 struct panthor_device *ptdev = sched->ptdev;
2457 if (!drm_dev_enter(&ptdev->base, &cookie))
2460 ret = panthor_device_resume_and_get(ptdev);
2461 if (drm_WARN_ON(&ptdev->base, ret))
2474 if (panthor_device_reset_is_pending(sched->ptdev))
2543 panthor_devfreq_record_idle(sched->ptdev);
2545 pm_runtime_put_autosuspend(ptdev->base.dev);
2549 panthor_devfreq_record_busy(sched->ptdev);
2551 pm_runtime_get(ptdev->base.dev);
2569 pm_runtime_mark_last_busy(ptdev->base.dev);
2570 pm_runtime_put_autosuspend(ptdev->base.dev);
2623 drm_WARN_ON(&group->ptdev->base, ret < 0);
2650 static void sched_resume_tick(struct panthor_device *ptdev)
2652 struct panthor_scheduler *sched = ptdev->scheduler;
2655 drm_WARN_ON(&ptdev->base, sched->resched_target != U64_MAX);
2673 struct panthor_device *ptdev = group->ptdev;
2674 struct panthor_scheduler *sched = ptdev->scheduler;
2721 sched_resume_tick(ptdev);
2745 struct panthor_scheduler *sched = group->ptdev->scheduler;
2758 struct panthor_scheduler *sched = group->ptdev->scheduler;
2760 lockdep_assert_held(&group->ptdev->scheduler->reset.lock);
2782 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
2785 if (ptdev->scheduler)
2786 sched_queue_delayed_work(ptdev->scheduler, tick, 0);
2789 void panthor_sched_prepare_for_vm_destruction(struct panthor_device *ptdev)
2800 flush_work(&ptdev->scheduler->tick_work.work);
2803 void panthor_sched_resume(struct panthor_device *ptdev)
2806 sched_queue_delayed_work(ptdev->scheduler, tick, 0);
2809 void panthor_sched_suspend(struct panthor_device *ptdev)
2811 struct panthor_scheduler *sched = ptdev->scheduler;
2822 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2831 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2837 drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
2854 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2860 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2880 cs_slot_reset_locked(ptdev, csg_id, i);
2894 if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0))
2904 csg_slot_sync_update_locked(ptdev, csg_id);
2920 sched_process_csg_irq_locked(ptdev, group->csg_id);
2924 drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node));
2941 void panthor_sched_pre_reset(struct panthor_device *ptdev)
2943 struct panthor_scheduler *sched = ptdev->scheduler;
2956 panthor_sched_suspend(ptdev);
2974 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
2976 struct panthor_scheduler *sched = ptdev->scheduler;
3171 struct panthor_device *ptdev = group->ptdev;
3172 struct panthor_scheduler *sched = ptdev->scheduler;
3174 params->addr_reg = ptdev->csif_info.cs_reg_count -
3175 ptdev->csif_info.unpreserved_cs_reg_count;
3300 struct panthor_device *ptdev = group->ptdev;
3301 struct panthor_scheduler *sched = ptdev->scheduler;
3316 ret = panthor_device_resume_and_get(ptdev);
3317 if (drm_WARN_ON(&ptdev->base, ret))
3370 sched_resume_tick(ptdev);
3372 gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
3375 pm_runtime_get(ptdev->base.dev);
3379 panthor_devfreq_record_busy(sched->ptdev);
3390 pm_runtime_mark_last_busy(ptdev->base.dev);
3391 pm_runtime_put_autosuspend(ptdev->base.dev);
3401 struct panthor_device *ptdev = group->ptdev;
3402 struct panthor_scheduler *sched = ptdev->scheduler;
3405 drm_warn(&ptdev->base, "job timeout: pid=%d, comm=%s, seqno=%llu\n",
3408 drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
3415 sched_queue_delayed_work(ptdev->scheduler, tick, 0);
3443 static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
3488 .submit_wq = group->ptdev->scheduler->wq,
3498 .timeout_wq = group->ptdev->reset.wq,
3499 .dev = group->ptdev->base.dev,
3527 queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm,
3543 queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev,
3554 calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size);
3557 panthor_kernel_bo_create(group->ptdev, group->vm,
3608 static void add_group_kbo_sizes(struct panthor_device *ptdev,
3614 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group)))
3616 if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev))
3638 struct panthor_device *ptdev = pfile->ptdev;
3640 struct panthor_scheduler *sched = ptdev->scheduler;
3641 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3652 if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) ||
3653 (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) ||
3654 (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present))
3671 group->ptdev = ptdev;
3694 group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3702 group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3709 group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm,
3757 add_group_kbo_sizes(group->ptdev, group);
3777 struct panthor_device *ptdev = pfile->ptdev;
3778 struct panthor_scheduler *sched = ptdev->scheduler;
3824 struct panthor_device *ptdev = pfile->ptdev;
3825 struct panthor_scheduler *sched = ptdev->scheduler;
3913 drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node));
4021 job->profiling.mask = pfile->ptdev->profile_mask;
4049 void panthor_sched_unplug(struct panthor_device *ptdev)
4051 struct panthor_scheduler *sched = ptdev->scheduler;
4059 pm_runtime_put(ptdev->base.dev);
4087 int panthor_sched_init(struct panthor_device *ptdev)
4089 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
4090 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
4091 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0);
4096 sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL);
4118 gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1));
4120 drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)",
4125 sched->ptdev = ptdev;
4130 ptdev->csif_info.csg_slot_count = sched->csg_slot_count;
4131 ptdev->csif_info.cs_slot_count = sched->cs_slot_count;
4132 ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count;
4141 ret = drmm_mutex_init(&ptdev->base, &sched->lock);
4151 ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock);
4175 panthor_sched_fini(&ptdev->base, sched);
4176 drm_err(&ptdev->base, "Failed to allocate the workqueues");
4180 ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched);
4184 ptdev->scheduler = sched;