15a993507STvrtko Ursulin // SPDX-License-Identifier: GPL-2.0
25a993507STvrtko Ursulin /* Copyright (c) 2025 Valve Corporation */
35a993507STvrtko Ursulin
45a993507STvrtko Ursulin #include "sched_tests.h"
55a993507STvrtko Ursulin
65a993507STvrtko Ursulin /*
75a993507STvrtko Ursulin * Here we implement the mock "GPU" (or the scheduler backend) which is used by
85a993507STvrtko Ursulin * the DRM scheduler unit tests in order to exercise the core functionality.
95a993507STvrtko Ursulin *
105a993507STvrtko Ursulin * Test cases are implemented in a separate file.
115a993507STvrtko Ursulin */
125a993507STvrtko Ursulin
135a993507STvrtko Ursulin /**
145a993507STvrtko Ursulin * drm_mock_sched_entity_new - Create a new mock scheduler entity
155a993507STvrtko Ursulin *
165a993507STvrtko Ursulin * @test: KUnit test owning the entity
175a993507STvrtko Ursulin * @priority: Scheduling priority
185a993507STvrtko Ursulin * @sched: Mock scheduler on which the entity can be scheduled
195a993507STvrtko Ursulin *
205a993507STvrtko Ursulin * Returns: New mock scheduler entity with allocation managed by the test
215a993507STvrtko Ursulin */
225a993507STvrtko Ursulin struct drm_mock_sched_entity *
drm_mock_sched_entity_new(struct kunit * test,enum drm_sched_priority priority,struct drm_mock_scheduler * sched)235a993507STvrtko Ursulin drm_mock_sched_entity_new(struct kunit *test,
245a993507STvrtko Ursulin enum drm_sched_priority priority,
255a993507STvrtko Ursulin struct drm_mock_scheduler *sched)
265a993507STvrtko Ursulin {
275a993507STvrtko Ursulin struct drm_mock_sched_entity *entity;
285a993507STvrtko Ursulin struct drm_gpu_scheduler *drm_sched;
295a993507STvrtko Ursulin int ret;
305a993507STvrtko Ursulin
315a993507STvrtko Ursulin entity = kunit_kzalloc(test, sizeof(*entity), GFP_KERNEL);
325a993507STvrtko Ursulin KUNIT_ASSERT_NOT_NULL(test, entity);
335a993507STvrtko Ursulin
345a993507STvrtko Ursulin drm_sched = &sched->base;
355a993507STvrtko Ursulin ret = drm_sched_entity_init(&entity->base,
365a993507STvrtko Ursulin priority,
375a993507STvrtko Ursulin &drm_sched, 1,
385a993507STvrtko Ursulin NULL);
395a993507STvrtko Ursulin KUNIT_ASSERT_EQ(test, ret, 0);
405a993507STvrtko Ursulin
415a993507STvrtko Ursulin entity->test = test;
425a993507STvrtko Ursulin
435a993507STvrtko Ursulin return entity;
445a993507STvrtko Ursulin }
455a993507STvrtko Ursulin
465a993507STvrtko Ursulin /**
475a993507STvrtko Ursulin * drm_mock_sched_entity_free - Destroys a mock scheduler entity
485a993507STvrtko Ursulin *
495a993507STvrtko Ursulin * @entity: Entity to destroy
505a993507STvrtko Ursulin *
515a993507STvrtko Ursulin * To be used from the test cases once done with the entity.
525a993507STvrtko Ursulin */
drm_mock_sched_entity_free(struct drm_mock_sched_entity * entity)535a993507STvrtko Ursulin void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity)
545a993507STvrtko Ursulin {
555a993507STvrtko Ursulin drm_sched_entity_destroy(&entity->base);
565a993507STvrtko Ursulin }
575a993507STvrtko Ursulin
drm_mock_sched_job_complete(struct drm_mock_sched_job * job)585a993507STvrtko Ursulin static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job)
595a993507STvrtko Ursulin {
605a993507STvrtko Ursulin struct drm_mock_scheduler *sched =
615a993507STvrtko Ursulin drm_sched_to_mock_sched(job->base.sched);
625a993507STvrtko Ursulin
635a993507STvrtko Ursulin lockdep_assert_held(&sched->lock);
645a993507STvrtko Ursulin
655a993507STvrtko Ursulin job->flags |= DRM_MOCK_SCHED_JOB_DONE;
664576de9bSPhilipp Stanner list_del(&job->link);
6780f3c51bSPhilipp Stanner dma_fence_signal_locked(&job->hw_fence);
685a993507STvrtko Ursulin complete(&job->done);
695a993507STvrtko Ursulin }
705a993507STvrtko Ursulin
715a993507STvrtko Ursulin static enum hrtimer_restart
drm_mock_sched_job_signal_timer(struct hrtimer * hrtimer)725a993507STvrtko Ursulin drm_mock_sched_job_signal_timer(struct hrtimer *hrtimer)
735a993507STvrtko Ursulin {
745a993507STvrtko Ursulin struct drm_mock_sched_job *job =
755a993507STvrtko Ursulin container_of(hrtimer, typeof(*job), timer);
765a993507STvrtko Ursulin struct drm_mock_scheduler *sched =
775a993507STvrtko Ursulin drm_sched_to_mock_sched(job->base.sched);
785a993507STvrtko Ursulin struct drm_mock_sched_job *next;
795a993507STvrtko Ursulin ktime_t now = ktime_get();
805a993507STvrtko Ursulin unsigned long flags;
815a993507STvrtko Ursulin LIST_HEAD(signal);
825a993507STvrtko Ursulin
835a993507STvrtko Ursulin spin_lock_irqsave(&sched->lock, flags);
845a993507STvrtko Ursulin list_for_each_entry_safe(job, next, &sched->job_list, link) {
855a993507STvrtko Ursulin if (!job->duration_us)
865a993507STvrtko Ursulin break;
875a993507STvrtko Ursulin
885a993507STvrtko Ursulin if (ktime_before(now, job->finish_at))
895a993507STvrtko Ursulin break;
905a993507STvrtko Ursulin
915a993507STvrtko Ursulin sched->hw_timeline.cur_seqno = job->hw_fence.seqno;
925a993507STvrtko Ursulin drm_mock_sched_job_complete(job);
935a993507STvrtko Ursulin }
945a993507STvrtko Ursulin spin_unlock_irqrestore(&sched->lock, flags);
955a993507STvrtko Ursulin
965a993507STvrtko Ursulin return HRTIMER_NORESTART;
975a993507STvrtko Ursulin }
985a993507STvrtko Ursulin
995a993507STvrtko Ursulin /**
1005a993507STvrtko Ursulin * drm_mock_sched_job_new - Create a new mock scheduler job
1015a993507STvrtko Ursulin *
1025a993507STvrtko Ursulin * @test: KUnit test owning the job
1035a993507STvrtko Ursulin * @entity: Scheduler entity of the job
1045a993507STvrtko Ursulin *
1055a993507STvrtko Ursulin * Returns: New mock scheduler job with allocation managed by the test
1065a993507STvrtko Ursulin */
1075a993507STvrtko Ursulin struct drm_mock_sched_job *
drm_mock_sched_job_new(struct kunit * test,struct drm_mock_sched_entity * entity)1085a993507STvrtko Ursulin drm_mock_sched_job_new(struct kunit *test,
1095a993507STvrtko Ursulin struct drm_mock_sched_entity *entity)
1105a993507STvrtko Ursulin {
1115a993507STvrtko Ursulin struct drm_mock_sched_job *job;
1125a993507STvrtko Ursulin int ret;
1135a993507STvrtko Ursulin
1145a993507STvrtko Ursulin job = kunit_kzalloc(test, sizeof(*job), GFP_KERNEL);
1155a993507STvrtko Ursulin KUNIT_ASSERT_NOT_NULL(test, job);
1165a993507STvrtko Ursulin
1175a993507STvrtko Ursulin ret = drm_sched_job_init(&job->base,
1185a993507STvrtko Ursulin &entity->base,
1195a993507STvrtko Ursulin 1,
12029565548SPierre-Eric Pelloux-Prayer NULL,
12129565548SPierre-Eric Pelloux-Prayer 1);
1225a993507STvrtko Ursulin KUNIT_ASSERT_EQ(test, ret, 0);
1235a993507STvrtko Ursulin
1245a993507STvrtko Ursulin job->test = test;
1255a993507STvrtko Ursulin
1265a993507STvrtko Ursulin init_completion(&job->done);
1275a993507STvrtko Ursulin INIT_LIST_HEAD(&job->link);
1281afba39fSThomas Zimmermann hrtimer_setup(&job->timer, drm_mock_sched_job_signal_timer,
1291afba39fSThomas Zimmermann CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1305a993507STvrtko Ursulin
1315a993507STvrtko Ursulin return job;
1325a993507STvrtko Ursulin }
1335a993507STvrtko Ursulin
drm_mock_sched_hw_fence_driver_name(struct dma_fence * fence)1345a993507STvrtko Ursulin static const char *drm_mock_sched_hw_fence_driver_name(struct dma_fence *fence)
1355a993507STvrtko Ursulin {
1365a993507STvrtko Ursulin return "drm_mock_sched";
1375a993507STvrtko Ursulin }
1385a993507STvrtko Ursulin
1395a993507STvrtko Ursulin static const char *
drm_mock_sched_hw_fence_timeline_name(struct dma_fence * fence)1405a993507STvrtko Ursulin drm_mock_sched_hw_fence_timeline_name(struct dma_fence *fence)
1415a993507STvrtko Ursulin {
1425a993507STvrtko Ursulin struct drm_mock_sched_job *job =
1435a993507STvrtko Ursulin container_of(fence, typeof(*job), hw_fence);
1445a993507STvrtko Ursulin
1455a993507STvrtko Ursulin return (const char *)job->base.sched->name;
1465a993507STvrtko Ursulin }
1475a993507STvrtko Ursulin
drm_mock_sched_hw_fence_release(struct dma_fence * fence)1485a993507STvrtko Ursulin static void drm_mock_sched_hw_fence_release(struct dma_fence *fence)
1495a993507STvrtko Ursulin {
1505a993507STvrtko Ursulin struct drm_mock_sched_job *job =
1515a993507STvrtko Ursulin container_of(fence, typeof(*job), hw_fence);
1525a993507STvrtko Ursulin
1535a993507STvrtko Ursulin hrtimer_cancel(&job->timer);
1545a993507STvrtko Ursulin
1555a993507STvrtko Ursulin /* Containing job is freed by the kunit framework */
1565a993507STvrtko Ursulin }
1575a993507STvrtko Ursulin
1585a993507STvrtko Ursulin static const struct dma_fence_ops drm_mock_sched_hw_fence_ops = {
1595a993507STvrtko Ursulin .get_driver_name = drm_mock_sched_hw_fence_driver_name,
1605a993507STvrtko Ursulin .get_timeline_name = drm_mock_sched_hw_fence_timeline_name,
1615a993507STvrtko Ursulin .release = drm_mock_sched_hw_fence_release,
1625a993507STvrtko Ursulin };
1635a993507STvrtko Ursulin
mock_sched_run_job(struct drm_sched_job * sched_job)1645a993507STvrtko Ursulin static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job)
1655a993507STvrtko Ursulin {
1665a993507STvrtko Ursulin struct drm_mock_scheduler *sched =
1675a993507STvrtko Ursulin drm_sched_to_mock_sched(sched_job->sched);
1685a993507STvrtko Ursulin struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
1695a993507STvrtko Ursulin
1705a993507STvrtko Ursulin dma_fence_init(&job->hw_fence,
1715a993507STvrtko Ursulin &drm_mock_sched_hw_fence_ops,
17280f3c51bSPhilipp Stanner &sched->lock,
1735a993507STvrtko Ursulin sched->hw_timeline.context,
1745a993507STvrtko Ursulin atomic_inc_return(&sched->hw_timeline.next_seqno));
1755a993507STvrtko Ursulin
1765a993507STvrtko Ursulin dma_fence_get(&job->hw_fence); /* Reference for the job_list */
1775a993507STvrtko Ursulin
1785a993507STvrtko Ursulin spin_lock_irq(&sched->lock);
1795a993507STvrtko Ursulin if (job->duration_us) {
1805a993507STvrtko Ursulin ktime_t prev_finish_at = 0;
1815a993507STvrtko Ursulin
1825a993507STvrtko Ursulin if (!list_empty(&sched->job_list)) {
1835a993507STvrtko Ursulin struct drm_mock_sched_job *prev =
1845a993507STvrtko Ursulin list_last_entry(&sched->job_list, typeof(*prev),
1855a993507STvrtko Ursulin link);
1865a993507STvrtko Ursulin
1875a993507STvrtko Ursulin prev_finish_at = prev->finish_at;
1885a993507STvrtko Ursulin }
1895a993507STvrtko Ursulin
1905a993507STvrtko Ursulin if (!prev_finish_at)
1915a993507STvrtko Ursulin prev_finish_at = ktime_get();
1925a993507STvrtko Ursulin
1935a993507STvrtko Ursulin job->finish_at = ktime_add_us(prev_finish_at, job->duration_us);
1945a993507STvrtko Ursulin }
1955a993507STvrtko Ursulin list_add_tail(&job->link, &sched->job_list);
1965a993507STvrtko Ursulin if (job->finish_at)
1975a993507STvrtko Ursulin hrtimer_start(&job->timer, job->finish_at, HRTIMER_MODE_ABS);
1985a993507STvrtko Ursulin spin_unlock_irq(&sched->lock);
1995a993507STvrtko Ursulin
2005a993507STvrtko Ursulin return &job->hw_fence;
2015a993507STvrtko Ursulin }
2025a993507STvrtko Ursulin
2038285af82SPhilipp Stanner /*
2048285af82SPhilipp Stanner * Normally, drivers would take appropriate measures in this callback, such as
2058285af82SPhilipp Stanner * killing the entity the faulty job is associated with, resetting the hardware
2068285af82SPhilipp Stanner * and / or resubmitting non-faulty jobs.
2078285af82SPhilipp Stanner *
2088285af82SPhilipp Stanner * For the mock scheduler, there are no hardware rings to be resetted nor jobs
2098285af82SPhilipp Stanner * to be resubmitted. Thus, this function merely ensures that
2108285af82SPhilipp Stanner * a) timedout fences get signaled properly and removed from the pending list
2118285af82SPhilipp Stanner * b) the mock scheduler framework gets informed about the timeout via a flag
2128285af82SPhilipp Stanner * c) The drm_sched_job, not longer needed, gets freed
2138285af82SPhilipp Stanner */
2145a993507STvrtko Ursulin static enum drm_gpu_sched_stat
mock_sched_timedout_job(struct drm_sched_job * sched_job)2155a993507STvrtko Ursulin mock_sched_timedout_job(struct drm_sched_job *sched_job)
2165a993507STvrtko Ursulin {
2178285af82SPhilipp Stanner struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched);
21853e65974STvrtko Ursulin struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
2198285af82SPhilipp Stanner unsigned long flags;
22053e65974STvrtko Ursulin
221*1472e754SMaíra Canal if (job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET) {
222*1472e754SMaíra Canal job->flags &= ~DRM_MOCK_SCHED_JOB_DONT_RESET;
223*1472e754SMaíra Canal return DRM_GPU_SCHED_STAT_NO_HANG;
224*1472e754SMaíra Canal }
225*1472e754SMaíra Canal
2268285af82SPhilipp Stanner spin_lock_irqsave(&sched->lock, flags);
2278285af82SPhilipp Stanner if (!dma_fence_is_signaled_locked(&job->hw_fence)) {
2288285af82SPhilipp Stanner list_del(&job->link);
22953e65974STvrtko Ursulin job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT;
2308285af82SPhilipp Stanner dma_fence_set_error(&job->hw_fence, -ETIMEDOUT);
2318285af82SPhilipp Stanner dma_fence_signal_locked(&job->hw_fence);
2328285af82SPhilipp Stanner }
2338285af82SPhilipp Stanner spin_unlock_irqrestore(&sched->lock, flags);
2348285af82SPhilipp Stanner
2358285af82SPhilipp Stanner dma_fence_put(&job->hw_fence);
2368285af82SPhilipp Stanner drm_sched_job_cleanup(sched_job);
2378285af82SPhilipp Stanner /* Mock job itself is freed by the kunit framework. */
23853e65974STvrtko Ursulin
2390a5dc1b6SMaíra Canal return DRM_GPU_SCHED_STAT_RESET;
2405a993507STvrtko Ursulin }
2415a993507STvrtko Ursulin
mock_sched_free_job(struct drm_sched_job * sched_job)2425a993507STvrtko Ursulin static void mock_sched_free_job(struct drm_sched_job *sched_job)
2435a993507STvrtko Ursulin {
2445a993507STvrtko Ursulin struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
2455a993507STvrtko Ursulin
2465a993507STvrtko Ursulin dma_fence_put(&job->hw_fence);
2475a993507STvrtko Ursulin drm_sched_job_cleanup(sched_job);
2485a993507STvrtko Ursulin
2495a993507STvrtko Ursulin /* Mock job itself is freed by the kunit framework. */
2505a993507STvrtko Ursulin }
2515a993507STvrtko Ursulin
mock_sched_cancel_job(struct drm_sched_job * sched_job)2524576de9bSPhilipp Stanner static void mock_sched_cancel_job(struct drm_sched_job *sched_job)
2534576de9bSPhilipp Stanner {
2544576de9bSPhilipp Stanner struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched);
2554576de9bSPhilipp Stanner struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
2564576de9bSPhilipp Stanner unsigned long flags;
2574576de9bSPhilipp Stanner
2584576de9bSPhilipp Stanner hrtimer_cancel(&job->timer);
2594576de9bSPhilipp Stanner
2604576de9bSPhilipp Stanner spin_lock_irqsave(&sched->lock, flags);
2614576de9bSPhilipp Stanner if (!dma_fence_is_signaled_locked(&job->hw_fence)) {
2624576de9bSPhilipp Stanner list_del(&job->link);
2634576de9bSPhilipp Stanner dma_fence_set_error(&job->hw_fence, -ECANCELED);
2644576de9bSPhilipp Stanner dma_fence_signal_locked(&job->hw_fence);
2654576de9bSPhilipp Stanner }
2664576de9bSPhilipp Stanner spin_unlock_irqrestore(&sched->lock, flags);
2674576de9bSPhilipp Stanner
2684576de9bSPhilipp Stanner /*
2694576de9bSPhilipp Stanner * The GPU Scheduler will call drm_sched_backend_ops.free_job(), still.
2704576de9bSPhilipp Stanner * Mock job itself is freed by the kunit framework.
2714576de9bSPhilipp Stanner */
2724576de9bSPhilipp Stanner }
2734576de9bSPhilipp Stanner
2745a993507STvrtko Ursulin static const struct drm_sched_backend_ops drm_mock_scheduler_ops = {
2755a993507STvrtko Ursulin .run_job = mock_sched_run_job,
2765a993507STvrtko Ursulin .timedout_job = mock_sched_timedout_job,
2774576de9bSPhilipp Stanner .free_job = mock_sched_free_job,
2784576de9bSPhilipp Stanner .cancel_job = mock_sched_cancel_job,
2795a993507STvrtko Ursulin };
2805a993507STvrtko Ursulin
2815a993507STvrtko Ursulin /**
2825a993507STvrtko Ursulin * drm_mock_sched_new - Create a new mock scheduler
2835a993507STvrtko Ursulin *
2845a993507STvrtko Ursulin * @test: KUnit test owning the job
28553e65974STvrtko Ursulin * @timeout: Job timeout to set
2865a993507STvrtko Ursulin *
2875a993507STvrtko Ursulin * Returns: New mock scheduler with allocation managed by the test
2885a993507STvrtko Ursulin */
drm_mock_sched_new(struct kunit * test,long timeout)28953e65974STvrtko Ursulin struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
2905a993507STvrtko Ursulin {
2915a993507STvrtko Ursulin struct drm_sched_init_args args = {
2925a993507STvrtko Ursulin .ops = &drm_mock_scheduler_ops,
2935a993507STvrtko Ursulin .num_rqs = DRM_SCHED_PRIORITY_COUNT,
2945a993507STvrtko Ursulin .credit_limit = U32_MAX,
2955a993507STvrtko Ursulin .hang_limit = 1,
29653e65974STvrtko Ursulin .timeout = timeout,
2975a993507STvrtko Ursulin .name = "drm-mock-scheduler",
2985a993507STvrtko Ursulin };
2995a993507STvrtko Ursulin struct drm_mock_scheduler *sched;
3005a993507STvrtko Ursulin int ret;
3015a993507STvrtko Ursulin
3025a993507STvrtko Ursulin sched = kunit_kzalloc(test, sizeof(*sched), GFP_KERNEL);
3035a993507STvrtko Ursulin KUNIT_ASSERT_NOT_NULL(test, sched);
3045a993507STvrtko Ursulin
3055a993507STvrtko Ursulin ret = drm_sched_init(&sched->base, &args);
3065a993507STvrtko Ursulin KUNIT_ASSERT_EQ(test, ret, 0);
3075a993507STvrtko Ursulin
3085a993507STvrtko Ursulin sched->test = test;
3095a993507STvrtko Ursulin sched->hw_timeline.context = dma_fence_context_alloc(1);
3105a993507STvrtko Ursulin atomic_set(&sched->hw_timeline.next_seqno, 0);
3115a993507STvrtko Ursulin INIT_LIST_HEAD(&sched->job_list);
3125a993507STvrtko Ursulin spin_lock_init(&sched->lock);
3135a993507STvrtko Ursulin
3145a993507STvrtko Ursulin return sched;
3155a993507STvrtko Ursulin }
3165a993507STvrtko Ursulin
3175a993507STvrtko Ursulin /**
3185a993507STvrtko Ursulin * drm_mock_sched_fini - Destroys a mock scheduler
3195a993507STvrtko Ursulin *
3205a993507STvrtko Ursulin * @sched: Scheduler to destroy
3215a993507STvrtko Ursulin *
3225a993507STvrtko Ursulin * To be used from the test cases once done with the scheduler.
3235a993507STvrtko Ursulin */
drm_mock_sched_fini(struct drm_mock_scheduler * sched)3245a993507STvrtko Ursulin void drm_mock_sched_fini(struct drm_mock_scheduler *sched)
3255a993507STvrtko Ursulin {
3265a993507STvrtko Ursulin drm_sched_fini(&sched->base);
3275a993507STvrtko Ursulin }
3285a993507STvrtko Ursulin
3295a993507STvrtko Ursulin /**
3305a993507STvrtko Ursulin * drm_mock_sched_advance - Advances the mock scheduler timeline
3315a993507STvrtko Ursulin *
3325a993507STvrtko Ursulin * @sched: Scheduler timeline to advance
3335a993507STvrtko Ursulin * @num: By how many jobs to advance
3345a993507STvrtko Ursulin *
3355a993507STvrtko Ursulin * Advancing the scheduler timeline by a number of seqnos will trigger
3365a993507STvrtko Ursulin * signalling of the hardware fences and unlinking the jobs from the internal
3375a993507STvrtko Ursulin * scheduler tracking.
3385a993507STvrtko Ursulin *
3395a993507STvrtko Ursulin * This can be used from test cases which want complete control of the simulated
3405a993507STvrtko Ursulin * job execution timing. For example submitting one job with no set duration
3415a993507STvrtko Ursulin * would never complete it before test cases advances the timeline by one.
3425a993507STvrtko Ursulin */
drm_mock_sched_advance(struct drm_mock_scheduler * sched,unsigned int num)3435a993507STvrtko Ursulin unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
3445a993507STvrtko Ursulin unsigned int num)
3455a993507STvrtko Ursulin {
3465a993507STvrtko Ursulin struct drm_mock_sched_job *job, *next;
3475a993507STvrtko Ursulin unsigned int found = 0;
3485a993507STvrtko Ursulin unsigned long flags;
3495a993507STvrtko Ursulin LIST_HEAD(signal);
3505a993507STvrtko Ursulin
3515a993507STvrtko Ursulin spin_lock_irqsave(&sched->lock, flags);
3525a993507STvrtko Ursulin if (WARN_ON_ONCE(sched->hw_timeline.cur_seqno + num <
3535a993507STvrtko Ursulin sched->hw_timeline.cur_seqno))
3545a993507STvrtko Ursulin goto unlock;
3555a993507STvrtko Ursulin sched->hw_timeline.cur_seqno += num;
3565a993507STvrtko Ursulin list_for_each_entry_safe(job, next, &sched->job_list, link) {
3575a993507STvrtko Ursulin if (sched->hw_timeline.cur_seqno < job->hw_fence.seqno)
3585a993507STvrtko Ursulin break;
3595a993507STvrtko Ursulin
3605a993507STvrtko Ursulin drm_mock_sched_job_complete(job);
3615a993507STvrtko Ursulin found++;
3625a993507STvrtko Ursulin }
3635a993507STvrtko Ursulin unlock:
3645a993507STvrtko Ursulin spin_unlock_irqrestore(&sched->lock, flags);
3655a993507STvrtko Ursulin
3665a993507STvrtko Ursulin return found;
3675a993507STvrtko Ursulin }
3685a993507STvrtko Ursulin
3695a993507STvrtko Ursulin MODULE_DESCRIPTION("DRM mock scheduler and tests");
3705a993507STvrtko Ursulin MODULE_LICENSE("GPL");
371