1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5 #include <linux/dma-resv.h>
6 #include <linux/kthread.h>
7 #include <linux/delay.h>
8 #include <linux/timer.h>
9 #include <linux/jiffies.h>
10 #include <linux/mutex.h>
11 #include <linux/ww_mutex.h>
12
13 #include <drm/ttm/ttm_resource.h>
14 #include <drm/ttm/ttm_placement.h>
15 #include <drm/ttm/ttm_tt.h>
16
17 #include "ttm_kunit_helpers.h"
18
19 #define BO_SIZE SZ_8K
20
21 #ifdef CONFIG_PREEMPT_RT
22 #define ww_mutex_base_lock(b) rt_mutex_lock(b)
23 #else
24 #define ww_mutex_base_lock(b) mutex_lock(b)
25 #endif
26
27 struct ttm_bo_test_case {
28 const char *description;
29 bool interruptible;
30 bool no_wait;
31 };
32
33 static const struct ttm_bo_test_case ttm_bo_reserved_cases[] = {
34 {
35 .description = "Cannot be interrupted and sleeps",
36 .interruptible = false,
37 .no_wait = false,
38 },
39 {
40 .description = "Cannot be interrupted, locks straight away",
41 .interruptible = false,
42 .no_wait = true,
43 },
44 {
45 .description = "Can be interrupted, sleeps",
46 .interruptible = true,
47 .no_wait = false,
48 },
49 };
50
ttm_bo_init_case_desc(const struct ttm_bo_test_case * t,char * desc)51 static void ttm_bo_init_case_desc(const struct ttm_bo_test_case *t,
52 char *desc)
53 {
54 strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
55 }
56
57 KUNIT_ARRAY_PARAM(ttm_bo_reserve, ttm_bo_reserved_cases, ttm_bo_init_case_desc);
58
ttm_bo_reserve_optimistic_no_ticket(struct kunit * test)59 static void ttm_bo_reserve_optimistic_no_ticket(struct kunit *test)
60 {
61 const struct ttm_bo_test_case *params = test->param_value;
62 struct ttm_buffer_object *bo;
63 int err;
64
65 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
66
67 err = ttm_bo_reserve(bo, params->interruptible, params->no_wait, NULL);
68 KUNIT_ASSERT_EQ(test, err, 0);
69
70 dma_resv_unlock(bo->base.resv);
71 }
72
ttm_bo_reserve_locked_no_sleep(struct kunit * test)73 static void ttm_bo_reserve_locked_no_sleep(struct kunit *test)
74 {
75 struct ttm_buffer_object *bo;
76 bool interruptible = false;
77 bool no_wait = true;
78 int err;
79
80 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
81
82 /* Let's lock it beforehand */
83 dma_resv_lock(bo->base.resv, NULL);
84
85 err = ttm_bo_reserve(bo, interruptible, no_wait, NULL);
86 dma_resv_unlock(bo->base.resv);
87
88 KUNIT_ASSERT_EQ(test, err, -EBUSY);
89 }
90
ttm_bo_reserve_no_wait_ticket(struct kunit * test)91 static void ttm_bo_reserve_no_wait_ticket(struct kunit *test)
92 {
93 struct ttm_buffer_object *bo;
94 struct ww_acquire_ctx ctx;
95 bool interruptible = false;
96 bool no_wait = true;
97 int err;
98
99 ww_acquire_init(&ctx, &reservation_ww_class);
100
101 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
102
103 err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
104 KUNIT_ASSERT_EQ(test, err, -EBUSY);
105
106 ww_acquire_fini(&ctx);
107 }
108
ttm_bo_reserve_double_resv(struct kunit * test)109 static void ttm_bo_reserve_double_resv(struct kunit *test)
110 {
111 struct ttm_buffer_object *bo;
112 struct ww_acquire_ctx ctx;
113 bool interruptible = false;
114 bool no_wait = false;
115 int err;
116
117 ww_acquire_init(&ctx, &reservation_ww_class);
118
119 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
120
121 err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
122 KUNIT_ASSERT_EQ(test, err, 0);
123
124 err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
125
126 dma_resv_unlock(bo->base.resv);
127 ww_acquire_fini(&ctx);
128
129 KUNIT_ASSERT_EQ(test, err, -EALREADY);
130 }
131
132 /*
133 * A test case heavily inspired by ww_test_edeadlk_normal(). It injects
134 * a deadlock by manipulating the sequence number of the context that holds
135 * dma_resv lock of bo2 so the other context is "wounded" and has to back off
136 * (indicated by -EDEADLK). The subtest checks if ttm_bo_reserve() properly
137 * propagates that error.
138 */
ttm_bo_reserve_deadlock(struct kunit * test)139 static void ttm_bo_reserve_deadlock(struct kunit *test)
140 {
141 struct ttm_buffer_object *bo1, *bo2;
142 struct ww_acquire_ctx ctx1, ctx2;
143 bool interruptible = false;
144 bool no_wait = false;
145 int err;
146
147 bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
148 bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
149
150 ww_acquire_init(&ctx1, &reservation_ww_class);
151 ww_mutex_base_lock(&bo2->base.resv->lock.base);
152
153 /* The deadlock will be caught by WW mutex, don't warn about it */
154 lock_release(&bo2->base.resv->lock.base.dep_map, 1);
155
156 bo2->base.resv->lock.ctx = &ctx2;
157 ctx2 = ctx1;
158 ctx2.stamp--; /* Make the context holding the lock younger */
159
160 err = ttm_bo_reserve(bo1, interruptible, no_wait, &ctx1);
161 KUNIT_ASSERT_EQ(test, err, 0);
162
163 err = ttm_bo_reserve(bo2, interruptible, no_wait, &ctx1);
164 KUNIT_ASSERT_EQ(test, err, -EDEADLK);
165
166 dma_resv_unlock(bo1->base.resv);
167 ww_acquire_fini(&ctx1);
168 }
169
170 #if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
171 struct signal_timer {
172 struct timer_list timer;
173 struct ww_acquire_ctx *ctx;
174 };
175
signal_for_ttm_bo_reserve(struct timer_list * t)176 static void signal_for_ttm_bo_reserve(struct timer_list *t)
177 {
178 struct signal_timer *s_timer = from_timer(s_timer, t, timer);
179 struct task_struct *task = s_timer->ctx->task;
180
181 do_send_sig_info(SIGTERM, SEND_SIG_PRIV, task, PIDTYPE_PID);
182 }
183
threaded_ttm_bo_reserve(void * arg)184 static int threaded_ttm_bo_reserve(void *arg)
185 {
186 struct ttm_buffer_object *bo = arg;
187 struct signal_timer s_timer;
188 struct ww_acquire_ctx ctx;
189 bool interruptible = true;
190 bool no_wait = false;
191 int err;
192
193 ww_acquire_init(&ctx, &reservation_ww_class);
194
195 /* Prepare a signal that will interrupt the reservation attempt */
196 timer_setup_on_stack(&s_timer.timer, &signal_for_ttm_bo_reserve, 0);
197 s_timer.ctx = &ctx;
198
199 mod_timer(&s_timer.timer, msecs_to_jiffies(100));
200
201 err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
202
203 timer_delete_sync(&s_timer.timer);
204 destroy_timer_on_stack(&s_timer.timer);
205
206 ww_acquire_fini(&ctx);
207
208 return err;
209 }
210
ttm_bo_reserve_interrupted(struct kunit * test)211 static void ttm_bo_reserve_interrupted(struct kunit *test)
212 {
213 struct ttm_buffer_object *bo;
214 struct task_struct *task;
215 int err;
216
217 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
218
219 task = kthread_create(threaded_ttm_bo_reserve, bo, "ttm-bo-reserve");
220
221 if (IS_ERR(task))
222 KUNIT_FAIL(test, "Couldn't create ttm bo reserve task\n");
223
224 /* Take a lock so the threaded reserve has to wait */
225 mutex_lock(&bo->base.resv->lock.base);
226
227 wake_up_process(task);
228 msleep(20);
229 err = kthread_stop(task);
230
231 mutex_unlock(&bo->base.resv->lock.base);
232
233 KUNIT_ASSERT_EQ(test, err, -ERESTARTSYS);
234 }
235 #endif /* IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST) */
236
ttm_bo_unreserve_basic(struct kunit * test)237 static void ttm_bo_unreserve_basic(struct kunit *test)
238 {
239 struct ttm_test_devices *priv = test->priv;
240 struct ttm_buffer_object *bo;
241 struct ttm_device *ttm_dev;
242 struct ttm_resource *res1, *res2;
243 struct ttm_place *place;
244 struct ttm_resource_manager *man;
245 unsigned int bo_prio = TTM_MAX_BO_PRIORITY - 1;
246 u32 mem_type = TTM_PL_SYSTEM;
247 int err;
248
249 place = ttm_place_kunit_init(test, mem_type, 0);
250
251 ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
252 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
253
254 err = ttm_device_kunit_init(priv, ttm_dev, false, false);
255 KUNIT_ASSERT_EQ(test, err, 0);
256 priv->ttm_dev = ttm_dev;
257
258 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
259 bo->priority = bo_prio;
260
261 err = ttm_resource_alloc(bo, place, &res1);
262 KUNIT_ASSERT_EQ(test, err, 0);
263
264 bo->resource = res1;
265
266 /* Add a dummy resource to populate LRU */
267 ttm_resource_alloc(bo, place, &res2);
268
269 dma_resv_lock(bo->base.resv, NULL);
270 ttm_bo_unreserve(bo);
271
272 man = ttm_manager_type(priv->ttm_dev, mem_type);
273 KUNIT_ASSERT_EQ(test,
274 list_is_last(&res1->lru.link, &man->lru[bo->priority]), 1);
275
276 ttm_resource_free(bo, &res2);
277 ttm_resource_free(bo, &res1);
278 }
279
ttm_bo_unreserve_pinned(struct kunit * test)280 static void ttm_bo_unreserve_pinned(struct kunit *test)
281 {
282 struct ttm_test_devices *priv = test->priv;
283 struct ttm_buffer_object *bo;
284 struct ttm_device *ttm_dev;
285 struct ttm_resource *res1, *res2;
286 struct ttm_place *place;
287 u32 mem_type = TTM_PL_SYSTEM;
288 int err;
289
290 ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
291 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
292
293 err = ttm_device_kunit_init(priv, ttm_dev, false, false);
294 KUNIT_ASSERT_EQ(test, err, 0);
295 priv->ttm_dev = ttm_dev;
296
297 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
298 place = ttm_place_kunit_init(test, mem_type, 0);
299
300 dma_resv_lock(bo->base.resv, NULL);
301 ttm_bo_pin(bo);
302
303 err = ttm_resource_alloc(bo, place, &res1);
304 KUNIT_ASSERT_EQ(test, err, 0);
305 bo->resource = res1;
306
307 /* Add a dummy resource to the pinned list */
308 err = ttm_resource_alloc(bo, place, &res2);
309 KUNIT_ASSERT_EQ(test, err, 0);
310 KUNIT_ASSERT_EQ(test,
311 list_is_last(&res2->lru.link, &priv->ttm_dev->pinned), 1);
312
313 ttm_bo_unreserve(bo);
314 KUNIT_ASSERT_EQ(test,
315 list_is_last(&res1->lru.link, &priv->ttm_dev->pinned), 1);
316
317 ttm_resource_free(bo, &res1);
318 ttm_resource_free(bo, &res2);
319 }
320
ttm_bo_unreserve_bulk(struct kunit * test)321 static void ttm_bo_unreserve_bulk(struct kunit *test)
322 {
323 struct ttm_test_devices *priv = test->priv;
324 struct ttm_lru_bulk_move lru_bulk_move;
325 struct ttm_lru_bulk_move_pos *pos;
326 struct ttm_buffer_object *bo1, *bo2;
327 struct ttm_resource *res1, *res2;
328 struct ttm_device *ttm_dev;
329 struct ttm_place *place;
330 struct dma_resv *resv;
331 u32 mem_type = TTM_PL_SYSTEM;
332 unsigned int bo_priority = 0;
333 int err;
334
335 ttm_lru_bulk_move_init(&lru_bulk_move);
336
337 place = ttm_place_kunit_init(test, mem_type, 0);
338
339 ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
340 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
341
342 resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL);
343 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
344
345 err = ttm_device_kunit_init(priv, ttm_dev, false, false);
346 KUNIT_ASSERT_EQ(test, err, 0);
347 priv->ttm_dev = ttm_dev;
348
349 dma_resv_init(resv);
350
351 bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, resv);
352 bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, resv);
353
354 dma_resv_lock(bo1->base.resv, NULL);
355 ttm_bo_set_bulk_move(bo1, &lru_bulk_move);
356 dma_resv_unlock(bo1->base.resv);
357
358 err = ttm_resource_alloc(bo1, place, &res1);
359 KUNIT_ASSERT_EQ(test, err, 0);
360 bo1->resource = res1;
361
362 dma_resv_lock(bo2->base.resv, NULL);
363 ttm_bo_set_bulk_move(bo2, &lru_bulk_move);
364 dma_resv_unlock(bo2->base.resv);
365
366 err = ttm_resource_alloc(bo2, place, &res2);
367 KUNIT_ASSERT_EQ(test, err, 0);
368 bo2->resource = res2;
369
370 ttm_bo_reserve(bo1, false, false, NULL);
371 ttm_bo_unreserve(bo1);
372
373 pos = &lru_bulk_move.pos[mem_type][bo_priority];
374 KUNIT_ASSERT_PTR_EQ(test, res1, pos->last);
375
376 ttm_resource_free(bo1, &res1);
377 ttm_resource_free(bo2, &res2);
378
379 dma_resv_fini(resv);
380 }
381
ttm_bo_put_basic(struct kunit * test)382 static void ttm_bo_put_basic(struct kunit *test)
383 {
384 struct ttm_test_devices *priv = test->priv;
385 struct ttm_buffer_object *bo;
386 struct ttm_resource *res;
387 struct ttm_device *ttm_dev;
388 struct ttm_place *place;
389 u32 mem_type = TTM_PL_SYSTEM;
390 int err;
391
392 place = ttm_place_kunit_init(test, mem_type, 0);
393
394 ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
395 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
396
397 err = ttm_device_kunit_init(priv, ttm_dev, false, false);
398 KUNIT_ASSERT_EQ(test, err, 0);
399 priv->ttm_dev = ttm_dev;
400
401 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
402 bo->type = ttm_bo_type_device;
403
404 err = ttm_resource_alloc(bo, place, &res);
405 KUNIT_ASSERT_EQ(test, err, 0);
406 bo->resource = res;
407
408 dma_resv_lock(bo->base.resv, NULL);
409 err = ttm_tt_create(bo, false);
410 dma_resv_unlock(bo->base.resv);
411 KUNIT_EXPECT_EQ(test, err, 0);
412
413 ttm_bo_put(bo);
414 }
415
mock_name(struct dma_fence * f)416 static const char *mock_name(struct dma_fence *f)
417 {
418 return "kunit-ttm-bo-put";
419 }
420
421 static const struct dma_fence_ops mock_fence_ops = {
422 .get_driver_name = mock_name,
423 .get_timeline_name = mock_name,
424 };
425
ttm_bo_put_shared_resv(struct kunit * test)426 static void ttm_bo_put_shared_resv(struct kunit *test)
427 {
428 struct ttm_test_devices *priv = test->priv;
429 struct ttm_buffer_object *bo;
430 struct dma_resv *external_resv;
431 struct dma_fence *fence;
432 /* A dummy DMA fence lock */
433 spinlock_t fence_lock;
434 struct ttm_device *ttm_dev;
435 int err;
436
437 ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
438 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
439
440 err = ttm_device_kunit_init(priv, ttm_dev, false, false);
441 KUNIT_ASSERT_EQ(test, err, 0);
442 priv->ttm_dev = ttm_dev;
443
444 external_resv = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
445 KUNIT_ASSERT_NOT_NULL(test, external_resv);
446
447 dma_resv_init(external_resv);
448
449 fence = kunit_kzalloc(test, sizeof(*fence), GFP_KERNEL);
450 KUNIT_ASSERT_NOT_NULL(test, fence);
451
452 spin_lock_init(&fence_lock);
453 dma_fence_init(fence, &mock_fence_ops, &fence_lock, 0, 0);
454
455 dma_resv_lock(external_resv, NULL);
456 dma_resv_reserve_fences(external_resv, 1);
457 dma_resv_add_fence(external_resv, fence, DMA_RESV_USAGE_BOOKKEEP);
458 dma_resv_unlock(external_resv);
459
460 dma_fence_signal(fence);
461
462 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
463 bo->type = ttm_bo_type_device;
464 bo->base.resv = external_resv;
465
466 ttm_bo_put(bo);
467 }
468
ttm_bo_pin_basic(struct kunit * test)469 static void ttm_bo_pin_basic(struct kunit *test)
470 {
471 struct ttm_test_devices *priv = test->priv;
472 struct ttm_buffer_object *bo;
473 struct ttm_device *ttm_dev;
474 unsigned int no_pins = 3;
475 int err;
476
477 ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
478 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
479
480 err = ttm_device_kunit_init(priv, ttm_dev, false, false);
481 KUNIT_ASSERT_EQ(test, err, 0);
482 priv->ttm_dev = ttm_dev;
483
484 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
485
486 for (int i = 0; i < no_pins; i++) {
487 dma_resv_lock(bo->base.resv, NULL);
488 ttm_bo_pin(bo);
489 dma_resv_unlock(bo->base.resv);
490 }
491
492 KUNIT_ASSERT_EQ(test, bo->pin_count, no_pins);
493 }
494
ttm_bo_pin_unpin_resource(struct kunit * test)495 static void ttm_bo_pin_unpin_resource(struct kunit *test)
496 {
497 struct ttm_test_devices *priv = test->priv;
498 struct ttm_lru_bulk_move lru_bulk_move;
499 struct ttm_lru_bulk_move_pos *pos;
500 struct ttm_buffer_object *bo;
501 struct ttm_resource *res;
502 struct ttm_device *ttm_dev;
503 struct ttm_place *place;
504 u32 mem_type = TTM_PL_SYSTEM;
505 unsigned int bo_priority = 0;
506 int err;
507
508 ttm_lru_bulk_move_init(&lru_bulk_move);
509
510 place = ttm_place_kunit_init(test, mem_type, 0);
511
512 ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
513 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
514
515 err = ttm_device_kunit_init(priv, ttm_dev, false, false);
516 KUNIT_ASSERT_EQ(test, err, 0);
517 priv->ttm_dev = ttm_dev;
518
519 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
520
521 err = ttm_resource_alloc(bo, place, &res);
522 KUNIT_ASSERT_EQ(test, err, 0);
523 bo->resource = res;
524
525 dma_resv_lock(bo->base.resv, NULL);
526 ttm_bo_set_bulk_move(bo, &lru_bulk_move);
527 ttm_bo_pin(bo);
528 dma_resv_unlock(bo->base.resv);
529
530 pos = &lru_bulk_move.pos[mem_type][bo_priority];
531
532 KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
533 KUNIT_ASSERT_NULL(test, pos->first);
534 KUNIT_ASSERT_NULL(test, pos->last);
535
536 dma_resv_lock(bo->base.resv, NULL);
537 ttm_bo_unpin(bo);
538 dma_resv_unlock(bo->base.resv);
539
540 KUNIT_ASSERT_PTR_EQ(test, res, pos->last);
541 KUNIT_ASSERT_EQ(test, bo->pin_count, 0);
542
543 ttm_resource_free(bo, &res);
544 }
545
ttm_bo_multiple_pin_one_unpin(struct kunit * test)546 static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
547 {
548 struct ttm_test_devices *priv = test->priv;
549 struct ttm_lru_bulk_move lru_bulk_move;
550 struct ttm_lru_bulk_move_pos *pos;
551 struct ttm_buffer_object *bo;
552 struct ttm_resource *res;
553 struct ttm_device *ttm_dev;
554 struct ttm_place *place;
555 u32 mem_type = TTM_PL_SYSTEM;
556 unsigned int bo_priority = 0;
557 int err;
558
559 ttm_lru_bulk_move_init(&lru_bulk_move);
560
561 place = ttm_place_kunit_init(test, mem_type, 0);
562
563 ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
564 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
565
566 err = ttm_device_kunit_init(priv, ttm_dev, false, false);
567 KUNIT_ASSERT_EQ(test, err, 0);
568 priv->ttm_dev = ttm_dev;
569
570 bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
571
572 err = ttm_resource_alloc(bo, place, &res);
573 KUNIT_ASSERT_EQ(test, err, 0);
574 bo->resource = res;
575
576 dma_resv_lock(bo->base.resv, NULL);
577 ttm_bo_set_bulk_move(bo, &lru_bulk_move);
578
579 /* Multiple pins */
580 ttm_bo_pin(bo);
581 ttm_bo_pin(bo);
582
583 dma_resv_unlock(bo->base.resv);
584
585 pos = &lru_bulk_move.pos[mem_type][bo_priority];
586
587 KUNIT_ASSERT_EQ(test, bo->pin_count, 2);
588 KUNIT_ASSERT_NULL(test, pos->first);
589 KUNIT_ASSERT_NULL(test, pos->last);
590
591 dma_resv_lock(bo->base.resv, NULL);
592 ttm_bo_unpin(bo);
593 dma_resv_unlock(bo->base.resv);
594
595 KUNIT_ASSERT_EQ(test, bo->pin_count, 1);
596 KUNIT_ASSERT_NULL(test, pos->first);
597 KUNIT_ASSERT_NULL(test, pos->last);
598
599 dma_resv_lock(bo->base.resv, NULL);
600 ttm_bo_unpin(bo);
601 dma_resv_unlock(bo->base.resv);
602
603 ttm_resource_free(bo, &res);
604 }
605
606 static struct kunit_case ttm_bo_test_cases[] = {
607 KUNIT_CASE_PARAM(ttm_bo_reserve_optimistic_no_ticket,
608 ttm_bo_reserve_gen_params),
609 KUNIT_CASE(ttm_bo_reserve_locked_no_sleep),
610 KUNIT_CASE(ttm_bo_reserve_no_wait_ticket),
611 KUNIT_CASE(ttm_bo_reserve_double_resv),
612 #if IS_BUILTIN(CONFIG_DRM_TTM_KUNIT_TEST)
613 KUNIT_CASE(ttm_bo_reserve_interrupted),
614 #endif
615 KUNIT_CASE(ttm_bo_reserve_deadlock),
616 KUNIT_CASE(ttm_bo_unreserve_basic),
617 KUNIT_CASE(ttm_bo_unreserve_pinned),
618 KUNIT_CASE(ttm_bo_unreserve_bulk),
619 KUNIT_CASE(ttm_bo_put_basic),
620 KUNIT_CASE(ttm_bo_put_shared_resv),
621 KUNIT_CASE(ttm_bo_pin_basic),
622 KUNIT_CASE(ttm_bo_pin_unpin_resource),
623 KUNIT_CASE(ttm_bo_multiple_pin_one_unpin),
624 {}
625 };
626
627 static struct kunit_suite ttm_bo_test_suite = {
628 .name = "ttm_bo",
629 .init = ttm_test_devices_init,
630 .exit = ttm_test_devices_fini,
631 .test_cases = ttm_bo_test_cases,
632 };
633
634 kunit_test_suites(&ttm_bo_test_suite);
635
636 MODULE_DESCRIPTION("KUnit tests for ttm_bo APIs");
637 MODULE_LICENSE("GPL and additional rights");
638