1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include <linux/export.h>
7
8 #include <drm/ttm/ttm_tt.h>
9
10 #include "ttm_kunit_helpers.h"
11
12 static const struct ttm_place sys_place = {
13 .fpfn = 0,
14 .lpfn = 0,
15 .mem_type = TTM_PL_SYSTEM,
16 .flags = TTM_PL_FLAG_FALLBACK,
17 };
18
19 static const struct ttm_place mock1_place = {
20 .fpfn = 0,
21 .lpfn = 0,
22 .mem_type = TTM_PL_MOCK1,
23 .flags = TTM_PL_FLAG_FALLBACK,
24 };
25
26 static const struct ttm_place mock2_place = {
27 .fpfn = 0,
28 .lpfn = 0,
29 .mem_type = TTM_PL_MOCK2,
30 .flags = TTM_PL_FLAG_FALLBACK,
31 };
32
33 static struct ttm_placement sys_placement = {
34 .num_placement = 1,
35 .placement = &sys_place,
36 };
37
38 static struct ttm_placement bad_placement = {
39 .num_placement = 1,
40 .placement = &mock1_place,
41 };
42
43 static struct ttm_placement mock_placement = {
44 .num_placement = 1,
45 .placement = &mock2_place,
46 };
47
ttm_tt_simple_create(struct ttm_buffer_object * bo,u32 page_flags)48 static struct ttm_tt *ttm_tt_simple_create(struct ttm_buffer_object *bo, u32 page_flags)
49 {
50 struct ttm_tt *tt;
51
52 tt = kzalloc_obj(*tt);
53 ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
54
55 return tt;
56 }
57
ttm_tt_simple_destroy(struct ttm_device * bdev,struct ttm_tt * ttm)58 static void ttm_tt_simple_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
59 {
60 kfree(ttm);
61 }
62
mock_move(struct ttm_buffer_object * bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_resource * new_mem,struct ttm_place * hop)63 static int mock_move(struct ttm_buffer_object *bo, bool evict,
64 struct ttm_operation_ctx *ctx,
65 struct ttm_resource *new_mem,
66 struct ttm_place *hop)
67 {
68 struct ttm_resource *old_mem = bo->resource;
69
70 if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm)) {
71 ttm_bo_move_null(bo, new_mem);
72 return 0;
73 }
74
75 if (bo->resource->mem_type == TTM_PL_VRAM &&
76 new_mem->mem_type == TTM_PL_SYSTEM) {
77 hop->mem_type = TTM_PL_TT;
78 hop->flags = TTM_PL_FLAG_TEMPORARY;
79 hop->fpfn = 0;
80 hop->lpfn = 0;
81 return -EMULTIHOP;
82 }
83
84 if ((old_mem->mem_type == TTM_PL_SYSTEM &&
85 new_mem->mem_type == TTM_PL_TT) ||
86 (old_mem->mem_type == TTM_PL_TT &&
87 new_mem->mem_type == TTM_PL_SYSTEM)) {
88 ttm_bo_move_null(bo, new_mem);
89 return 0;
90 }
91
92 return ttm_bo_move_memcpy(bo, ctx, new_mem);
93 }
94
mock_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)95 static void mock_evict_flags(struct ttm_buffer_object *bo,
96 struct ttm_placement *placement)
97 {
98 switch (bo->resource->mem_type) {
99 case TTM_PL_VRAM:
100 case TTM_PL_SYSTEM:
101 *placement = sys_placement;
102 break;
103 case TTM_PL_TT:
104 *placement = mock_placement;
105 break;
106 case TTM_PL_MOCK1:
107 /* Purge objects coming from this domain */
108 break;
109 }
110 }
111
bad_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)112 static void bad_evict_flags(struct ttm_buffer_object *bo,
113 struct ttm_placement *placement)
114 {
115 *placement = bad_placement;
116 }
117
ttm_device_kunit_init_with_funcs(struct ttm_test_devices * priv,struct ttm_device * ttm,unsigned int alloc_flags,struct ttm_device_funcs * funcs)118 static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv,
119 struct ttm_device *ttm,
120 unsigned int alloc_flags,
121 struct ttm_device_funcs *funcs)
122 {
123 struct drm_device *drm = priv->drm;
124 int err;
125
126 err = ttm_device_init(ttm, funcs, drm->dev,
127 drm->anon_inode->i_mapping,
128 drm->vma_offset_manager,
129 alloc_flags);
130
131 return err;
132 }
133
134 struct ttm_device_funcs ttm_dev_funcs = {
135 .ttm_tt_create = ttm_tt_simple_create,
136 .ttm_tt_destroy = ttm_tt_simple_destroy,
137 .move = mock_move,
138 .eviction_valuable = ttm_bo_eviction_valuable,
139 .evict_flags = mock_evict_flags,
140 };
141 EXPORT_SYMBOL_GPL(ttm_dev_funcs);
142
ttm_device_kunit_init(struct ttm_test_devices * priv,struct ttm_device * ttm,unsigned int alloc_flags)143 int ttm_device_kunit_init(struct ttm_test_devices *priv,
144 struct ttm_device *ttm,
145 unsigned int alloc_flags)
146 {
147 return ttm_device_kunit_init_with_funcs(priv, ttm, alloc_flags,
148 &ttm_dev_funcs);
149 }
150 EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
151
152 struct ttm_device_funcs ttm_dev_funcs_bad_evict = {
153 .ttm_tt_create = ttm_tt_simple_create,
154 .ttm_tt_destroy = ttm_tt_simple_destroy,
155 .move = mock_move,
156 .eviction_valuable = ttm_bo_eviction_valuable,
157 .evict_flags = bad_evict_flags,
158 };
159 EXPORT_SYMBOL_GPL(ttm_dev_funcs_bad_evict);
160
ttm_device_kunit_init_bad_evict(struct ttm_test_devices * priv,struct ttm_device * ttm)161 int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
162 struct ttm_device *ttm)
163 {
164 return ttm_device_kunit_init_with_funcs(priv, ttm, 0,
165 &ttm_dev_funcs_bad_evict);
166 }
167 EXPORT_SYMBOL_GPL(ttm_device_kunit_init_bad_evict);
168
ttm_bo_kunit_init(struct kunit * test,struct ttm_test_devices * devs,size_t size,struct dma_resv * obj)169 struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
170 struct ttm_test_devices *devs,
171 size_t size,
172 struct dma_resv *obj)
173 {
174 struct drm_gem_object gem_obj = { };
175 struct ttm_buffer_object *bo;
176 int err;
177
178 bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
179 KUNIT_ASSERT_NOT_NULL(test, bo);
180
181 bo->base = gem_obj;
182
183 if (obj)
184 bo->base.resv = obj;
185
186 err = drm_gem_object_init(devs->drm, &bo->base, size);
187 KUNIT_ASSERT_EQ(test, err, 0);
188
189 bo->bdev = devs->ttm_dev;
190 bo->destroy = dummy_ttm_bo_destroy;
191
192 kref_init(&bo->kref);
193
194 return bo;
195 }
196 EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
197
ttm_place_kunit_init(struct kunit * test,u32 mem_type,u32 flags)198 struct ttm_place *ttm_place_kunit_init(struct kunit *test, u32 mem_type, u32 flags)
199 {
200 struct ttm_place *place;
201
202 place = kunit_kzalloc(test, sizeof(*place), GFP_KERNEL);
203 KUNIT_ASSERT_NOT_NULL(test, place);
204
205 place->mem_type = mem_type;
206 place->flags = flags;
207
208 return place;
209 }
210 EXPORT_SYMBOL_GPL(ttm_place_kunit_init);
211
dummy_ttm_bo_destroy(struct ttm_buffer_object * bo)212 void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo)
213 {
214 drm_gem_object_release(&bo->base);
215 }
216 EXPORT_SYMBOL_GPL(dummy_ttm_bo_destroy);
217
ttm_test_devices_basic(struct kunit * test)218 struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
219 {
220 struct ttm_test_devices *devs;
221
222 devs = kunit_kzalloc(test, sizeof(*devs), GFP_KERNEL);
223 KUNIT_ASSERT_NOT_NULL(test, devs);
224
225 devs->dev = drm_kunit_helper_alloc_device(test);
226 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->dev);
227
228 /* Set mask for alloc_coherent mappings to enable ttm_pool_alloc testing */
229 devs->dev->coherent_dma_mask = -1;
230
231 devs->drm = __drm_kunit_helper_alloc_drm_device(test, devs->dev,
232 sizeof(*devs->drm), 0,
233 DRIVER_GEM);
234 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->drm);
235
236 return devs;
237 }
238 EXPORT_SYMBOL_GPL(ttm_test_devices_basic);
239
ttm_test_devices_all(struct kunit * test)240 struct ttm_test_devices *ttm_test_devices_all(struct kunit *test)
241 {
242 struct ttm_test_devices *devs;
243 struct ttm_device *ttm_dev;
244 int err;
245
246 devs = ttm_test_devices_basic(test);
247
248 ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
249 KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
250
251 err = ttm_device_kunit_init(devs, ttm_dev, 0);
252 KUNIT_ASSERT_EQ(test, err, 0);
253
254 devs->ttm_dev = ttm_dev;
255
256 return devs;
257 }
258 EXPORT_SYMBOL_GPL(ttm_test_devices_all);
259
ttm_test_devices_put(struct kunit * test,struct ttm_test_devices * devs)260 void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs)
261 {
262 if (devs->ttm_dev)
263 ttm_device_fini(devs->ttm_dev);
264
265 drm_kunit_helper_free_device(test, devs->dev);
266 }
267 EXPORT_SYMBOL_GPL(ttm_test_devices_put);
268
ttm_test_devices_init(struct kunit * test)269 int ttm_test_devices_init(struct kunit *test)
270 {
271 struct ttm_test_devices *priv;
272
273 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
274 KUNIT_ASSERT_NOT_NULL(test, priv);
275
276 priv = ttm_test_devices_basic(test);
277 test->priv = priv;
278
279 return 0;
280 }
281 EXPORT_SYMBOL_GPL(ttm_test_devices_init);
282
ttm_test_devices_all_init(struct kunit * test)283 int ttm_test_devices_all_init(struct kunit *test)
284 {
285 struct ttm_test_devices *priv;
286
287 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
288 KUNIT_ASSERT_NOT_NULL(test, priv);
289
290 priv = ttm_test_devices_all(test);
291 test->priv = priv;
292
293 return 0;
294 }
295 EXPORT_SYMBOL_GPL(ttm_test_devices_all_init);
296
ttm_test_devices_fini(struct kunit * test)297 void ttm_test_devices_fini(struct kunit *test)
298 {
299 ttm_test_devices_put(test, test->priv);
300 }
301 EXPORT_SYMBOL_GPL(ttm_test_devices_fini);
302
303 MODULE_DESCRIPTION("TTM KUnit test helper functions");
304 MODULE_LICENSE("GPL and additional rights");
305