xref: /linux/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 #include <drm/ttm/ttm_tt.h>
6 
7 #include "ttm_kunit_helpers.h"
8 
9 static const struct ttm_place sys_place = {
10 	.fpfn = 0,
11 	.lpfn = 0,
12 	.mem_type = TTM_PL_SYSTEM,
13 	.flags = TTM_PL_FLAG_FALLBACK,
14 };
15 
16 static const struct ttm_place mock1_place = {
17 	.fpfn = 0,
18 	.lpfn = 0,
19 	.mem_type = TTM_PL_MOCK1,
20 	.flags = TTM_PL_FLAG_FALLBACK,
21 };
22 
23 static const struct ttm_place mock2_place = {
24 	.fpfn = 0,
25 	.lpfn = 0,
26 	.mem_type = TTM_PL_MOCK2,
27 	.flags = TTM_PL_FLAG_FALLBACK,
28 };
29 
30 static struct ttm_placement sys_placement = {
31 	.num_placement = 1,
32 	.placement = &sys_place,
33 };
34 
35 static struct ttm_placement bad_placement = {
36 	.num_placement = 1,
37 	.placement = &mock1_place,
38 };
39 
40 static struct ttm_placement mock_placement = {
41 	.num_placement = 1,
42 	.placement = &mock2_place,
43 };
44 
ttm_tt_simple_create(struct ttm_buffer_object * bo,u32 page_flags)45 static struct ttm_tt *ttm_tt_simple_create(struct ttm_buffer_object *bo, u32 page_flags)
46 {
47 	struct ttm_tt *tt;
48 
49 	tt = kzalloc(sizeof(*tt), GFP_KERNEL);
50 	ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
51 
52 	return tt;
53 }
54 
ttm_tt_simple_destroy(struct ttm_device * bdev,struct ttm_tt * ttm)55 static void ttm_tt_simple_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
56 {
57 	kfree(ttm);
58 }
59 
mock_move(struct ttm_buffer_object * bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_resource * new_mem,struct ttm_place * hop)60 static int mock_move(struct ttm_buffer_object *bo, bool evict,
61 		     struct ttm_operation_ctx *ctx,
62 		     struct ttm_resource *new_mem,
63 		     struct ttm_place *hop)
64 {
65 	struct ttm_resource *old_mem = bo->resource;
66 
67 	if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm)) {
68 		ttm_bo_move_null(bo, new_mem);
69 		return 0;
70 	}
71 
72 	if (bo->resource->mem_type == TTM_PL_VRAM &&
73 	    new_mem->mem_type == TTM_PL_SYSTEM) {
74 		hop->mem_type = TTM_PL_TT;
75 		hop->flags = TTM_PL_FLAG_TEMPORARY;
76 		hop->fpfn = 0;
77 		hop->lpfn = 0;
78 		return -EMULTIHOP;
79 	}
80 
81 	if ((old_mem->mem_type == TTM_PL_SYSTEM &&
82 	     new_mem->mem_type == TTM_PL_TT) ||
83 	    (old_mem->mem_type == TTM_PL_TT &&
84 	     new_mem->mem_type == TTM_PL_SYSTEM)) {
85 		ttm_bo_move_null(bo, new_mem);
86 		return 0;
87 	}
88 
89 	return ttm_bo_move_memcpy(bo, ctx, new_mem);
90 }
91 
mock_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)92 static void mock_evict_flags(struct ttm_buffer_object *bo,
93 			     struct ttm_placement *placement)
94 {
95 	switch (bo->resource->mem_type) {
96 	case TTM_PL_VRAM:
97 	case TTM_PL_SYSTEM:
98 		*placement = sys_placement;
99 		break;
100 	case TTM_PL_TT:
101 		*placement = mock_placement;
102 		break;
103 	case TTM_PL_MOCK1:
104 		/* Purge objects coming from this domain */
105 		break;
106 	}
107 }
108 
bad_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)109 static void bad_evict_flags(struct ttm_buffer_object *bo,
110 			    struct ttm_placement *placement)
111 {
112 	*placement = bad_placement;
113 }
114 
ttm_device_kunit_init_with_funcs(struct ttm_test_devices * priv,struct ttm_device * ttm,bool use_dma_alloc,bool use_dma32,struct ttm_device_funcs * funcs)115 static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv,
116 					    struct ttm_device *ttm,
117 					    bool use_dma_alloc,
118 					    bool use_dma32,
119 					    struct ttm_device_funcs *funcs)
120 {
121 	struct drm_device *drm = priv->drm;
122 	int err;
123 
124 	err = ttm_device_init(ttm, funcs, drm->dev,
125 			      drm->anon_inode->i_mapping,
126 			      drm->vma_offset_manager,
127 			      use_dma_alloc, use_dma32);
128 
129 	return err;
130 }
131 
132 struct ttm_device_funcs ttm_dev_funcs = {
133 	.ttm_tt_create = ttm_tt_simple_create,
134 	.ttm_tt_destroy = ttm_tt_simple_destroy,
135 	.move = mock_move,
136 	.eviction_valuable = ttm_bo_eviction_valuable,
137 	.evict_flags = mock_evict_flags,
138 };
139 EXPORT_SYMBOL_GPL(ttm_dev_funcs);
140 
ttm_device_kunit_init(struct ttm_test_devices * priv,struct ttm_device * ttm,bool use_dma_alloc,bool use_dma32)141 int ttm_device_kunit_init(struct ttm_test_devices *priv,
142 			  struct ttm_device *ttm,
143 			  bool use_dma_alloc,
144 			  bool use_dma32)
145 {
146 	return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc,
147 						use_dma32, &ttm_dev_funcs);
148 }
149 EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
150 
151 struct ttm_device_funcs ttm_dev_funcs_bad_evict = {
152 	.ttm_tt_create = ttm_tt_simple_create,
153 	.ttm_tt_destroy = ttm_tt_simple_destroy,
154 	.move = mock_move,
155 	.eviction_valuable = ttm_bo_eviction_valuable,
156 	.evict_flags = bad_evict_flags,
157 };
158 EXPORT_SYMBOL_GPL(ttm_dev_funcs_bad_evict);
159 
ttm_device_kunit_init_bad_evict(struct ttm_test_devices * priv,struct ttm_device * ttm,bool use_dma_alloc,bool use_dma32)160 int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
161 				    struct ttm_device *ttm,
162 				    bool use_dma_alloc,
163 				    bool use_dma32)
164 {
165 	return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc,
166 						use_dma32, &ttm_dev_funcs_bad_evict);
167 }
168 EXPORT_SYMBOL_GPL(ttm_device_kunit_init_bad_evict);
169 
ttm_bo_kunit_init(struct kunit * test,struct ttm_test_devices * devs,size_t size,struct dma_resv * obj)170 struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
171 					    struct ttm_test_devices *devs,
172 					    size_t size,
173 					    struct dma_resv *obj)
174 {
175 	struct drm_gem_object gem_obj = { };
176 	struct ttm_buffer_object *bo;
177 	int err;
178 
179 	bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
180 	KUNIT_ASSERT_NOT_NULL(test, bo);
181 
182 	bo->base = gem_obj;
183 
184 	if (obj)
185 		bo->base.resv = obj;
186 
187 	err = drm_gem_object_init(devs->drm, &bo->base, size);
188 	KUNIT_ASSERT_EQ(test, err, 0);
189 
190 	bo->bdev = devs->ttm_dev;
191 	bo->destroy = dummy_ttm_bo_destroy;
192 
193 	kref_init(&bo->kref);
194 
195 	return bo;
196 }
197 EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
198 
ttm_place_kunit_init(struct kunit * test,u32 mem_type,u32 flags)199 struct ttm_place *ttm_place_kunit_init(struct kunit *test, u32 mem_type, u32 flags)
200 {
201 	struct ttm_place *place;
202 
203 	place = kunit_kzalloc(test, sizeof(*place), GFP_KERNEL);
204 	KUNIT_ASSERT_NOT_NULL(test, place);
205 
206 	place->mem_type = mem_type;
207 	place->flags = flags;
208 
209 	return place;
210 }
211 EXPORT_SYMBOL_GPL(ttm_place_kunit_init);
212 
dummy_ttm_bo_destroy(struct ttm_buffer_object * bo)213 void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo)
214 {
215 	drm_gem_object_release(&bo->base);
216 }
217 EXPORT_SYMBOL_GPL(dummy_ttm_bo_destroy);
218 
ttm_test_devices_basic(struct kunit * test)219 struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
220 {
221 	struct ttm_test_devices *devs;
222 
223 	devs = kunit_kzalloc(test, sizeof(*devs), GFP_KERNEL);
224 	KUNIT_ASSERT_NOT_NULL(test, devs);
225 
226 	devs->dev = drm_kunit_helper_alloc_device(test);
227 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->dev);
228 
229 	/* Set mask for alloc_coherent mappings to enable ttm_pool_alloc testing */
230 	devs->dev->coherent_dma_mask = -1;
231 
232 	devs->drm = __drm_kunit_helper_alloc_drm_device(test, devs->dev,
233 							sizeof(*devs->drm), 0,
234 							DRIVER_GEM);
235 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->drm);
236 
237 	return devs;
238 }
239 EXPORT_SYMBOL_GPL(ttm_test_devices_basic);
240 
ttm_test_devices_all(struct kunit * test)241 struct ttm_test_devices *ttm_test_devices_all(struct kunit *test)
242 {
243 	struct ttm_test_devices *devs;
244 	struct ttm_device *ttm_dev;
245 	int err;
246 
247 	devs = ttm_test_devices_basic(test);
248 
249 	ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
250 	KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
251 
252 	err = ttm_device_kunit_init(devs, ttm_dev, false, false);
253 	KUNIT_ASSERT_EQ(test, err, 0);
254 
255 	devs->ttm_dev = ttm_dev;
256 
257 	return devs;
258 }
259 EXPORT_SYMBOL_GPL(ttm_test_devices_all);
260 
ttm_test_devices_put(struct kunit * test,struct ttm_test_devices * devs)261 void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs)
262 {
263 	if (devs->ttm_dev)
264 		ttm_device_fini(devs->ttm_dev);
265 
266 	drm_kunit_helper_free_device(test, devs->dev);
267 }
268 EXPORT_SYMBOL_GPL(ttm_test_devices_put);
269 
ttm_test_devices_init(struct kunit * test)270 int ttm_test_devices_init(struct kunit *test)
271 {
272 	struct ttm_test_devices *priv;
273 
274 	priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
275 	KUNIT_ASSERT_NOT_NULL(test, priv);
276 
277 	priv = ttm_test_devices_basic(test);
278 	test->priv = priv;
279 
280 	return 0;
281 }
282 EXPORT_SYMBOL_GPL(ttm_test_devices_init);
283 
ttm_test_devices_all_init(struct kunit * test)284 int ttm_test_devices_all_init(struct kunit *test)
285 {
286 	struct ttm_test_devices *priv;
287 
288 	priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
289 	KUNIT_ASSERT_NOT_NULL(test, priv);
290 
291 	priv = ttm_test_devices_all(test);
292 	test->priv = priv;
293 
294 	return 0;
295 }
296 EXPORT_SYMBOL_GPL(ttm_test_devices_all_init);
297 
ttm_test_devices_fini(struct kunit * test)298 void ttm_test_devices_fini(struct kunit *test)
299 {
300 	ttm_test_devices_put(test, test->priv);
301 }
302 EXPORT_SYMBOL_GPL(ttm_test_devices_fini);
303 
304 MODULE_DESCRIPTION("TTM KUnit test helper functions");
305 MODULE_LICENSE("GPL and additional rights");
306