xref: /linux/drivers/gpu/drm/i915/selftests/intel_memory_region.c (revision 17cfcb68af3bc7d5e8ae08779b1853310a2949f3)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/prime_numbers.h>
7 
8 #include "../i915_selftest.h"
9 
10 #include "mock_drm.h"
11 #include "mock_gem_device.h"
12 #include "mock_region.h"
13 
14 #include "gem/i915_gem_region.h"
15 #include "gem/selftests/mock_context.h"
16 #include "selftests/i915_random.h"
17 
18 static void close_objects(struct intel_memory_region *mem,
19 			  struct list_head *objects)
20 {
21 	struct drm_i915_private *i915 = mem->i915;
22 	struct drm_i915_gem_object *obj, *on;
23 
24 	list_for_each_entry_safe(obj, on, objects, st_link) {
25 		if (i915_gem_object_has_pinned_pages(obj))
26 			i915_gem_object_unpin_pages(obj);
27 		/* No polluting the memory region between tests */
28 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
29 		list_del(&obj->st_link);
30 		i915_gem_object_put(obj);
31 	}
32 
33 	cond_resched();
34 
35 	i915_gem_drain_freed_objects(i915);
36 }
37 
38 static int igt_mock_fill(void *arg)
39 {
40 	struct intel_memory_region *mem = arg;
41 	resource_size_t total = resource_size(&mem->region);
42 	resource_size_t page_size;
43 	resource_size_t rem;
44 	unsigned long max_pages;
45 	unsigned long page_num;
46 	LIST_HEAD(objects);
47 	int err = 0;
48 
49 	page_size = mem->mm.chunk_size;
50 	max_pages = div64_u64(total, page_size);
51 	rem = total;
52 
53 	for_each_prime_number_from(page_num, 1, max_pages) {
54 		resource_size_t size = page_num * page_size;
55 		struct drm_i915_gem_object *obj;
56 
57 		obj = i915_gem_object_create_region(mem, size, 0);
58 		if (IS_ERR(obj)) {
59 			err = PTR_ERR(obj);
60 			break;
61 		}
62 
63 		err = i915_gem_object_pin_pages(obj);
64 		if (err) {
65 			i915_gem_object_put(obj);
66 			break;
67 		}
68 
69 		list_add(&obj->st_link, &objects);
70 		rem -= size;
71 	}
72 
73 	if (err == -ENOMEM)
74 		err = 0;
75 	if (err == -ENXIO) {
76 		if (page_num * page_size <= rem) {
77 			pr_err("%s failed, space still left in region\n",
78 			       __func__);
79 			err = -EINVAL;
80 		} else {
81 			err = 0;
82 		}
83 	}
84 
85 	close_objects(mem, &objects);
86 
87 	return err;
88 }
89 
90 static struct drm_i915_gem_object *
91 igt_object_create(struct intel_memory_region *mem,
92 		  struct list_head *objects,
93 		  u64 size,
94 		  unsigned int flags)
95 {
96 	struct drm_i915_gem_object *obj;
97 	int err;
98 
99 	obj = i915_gem_object_create_region(mem, size, flags);
100 	if (IS_ERR(obj))
101 		return obj;
102 
103 	err = i915_gem_object_pin_pages(obj);
104 	if (err)
105 		goto put;
106 
107 	list_add(&obj->st_link, objects);
108 	return obj;
109 
110 put:
111 	i915_gem_object_put(obj);
112 	return ERR_PTR(err);
113 }
114 
115 static void igt_object_release(struct drm_i915_gem_object *obj)
116 {
117 	i915_gem_object_unpin_pages(obj);
118 	__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
119 	list_del(&obj->st_link);
120 	i915_gem_object_put(obj);
121 }
122 
123 static int igt_mock_contiguous(void *arg)
124 {
125 	struct intel_memory_region *mem = arg;
126 	struct drm_i915_gem_object *obj;
127 	unsigned long n_objects;
128 	LIST_HEAD(objects);
129 	LIST_HEAD(holes);
130 	I915_RND_STATE(prng);
131 	resource_size_t total;
132 	resource_size_t min;
133 	u64 target;
134 	int err = 0;
135 
136 	total = resource_size(&mem->region);
137 
138 	/* Min size */
139 	obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
140 				I915_BO_ALLOC_CONTIGUOUS);
141 	if (IS_ERR(obj))
142 		return PTR_ERR(obj);
143 
144 	if (obj->mm.pages->nents != 1) {
145 		pr_err("%s min object spans multiple sg entries\n", __func__);
146 		err = -EINVAL;
147 		goto err_close_objects;
148 	}
149 
150 	igt_object_release(obj);
151 
152 	/* Max size */
153 	obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
154 	if (IS_ERR(obj))
155 		return PTR_ERR(obj);
156 
157 	if (obj->mm.pages->nents != 1) {
158 		pr_err("%s max object spans multiple sg entries\n", __func__);
159 		err = -EINVAL;
160 		goto err_close_objects;
161 	}
162 
163 	igt_object_release(obj);
164 
165 	/* Internal fragmentation should not bleed into the object size */
166 	target = i915_prandom_u64_state(&prng);
167 	div64_u64_rem(target, total, &target);
168 	target = round_up(target, PAGE_SIZE);
169 	target = max_t(u64, PAGE_SIZE, target);
170 
171 	obj = igt_object_create(mem, &objects, target,
172 				I915_BO_ALLOC_CONTIGUOUS);
173 	if (IS_ERR(obj))
174 		return PTR_ERR(obj);
175 
176 	if (obj->base.size != target) {
177 		pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
178 		       obj->base.size, target);
179 		err = -EINVAL;
180 		goto err_close_objects;
181 	}
182 
183 	if (obj->mm.pages->nents != 1) {
184 		pr_err("%s object spans multiple sg entries\n", __func__);
185 		err = -EINVAL;
186 		goto err_close_objects;
187 	}
188 
189 	igt_object_release(obj);
190 
191 	/*
192 	 * Try to fragment the address space, such that half of it is free, but
193 	 * the max contiguous block size is SZ_64K.
194 	 */
195 
196 	target = SZ_64K;
197 	n_objects = div64_u64(total, target);
198 
199 	while (n_objects--) {
200 		struct list_head *list;
201 
202 		if (n_objects % 2)
203 			list = &holes;
204 		else
205 			list = &objects;
206 
207 		obj = igt_object_create(mem, list, target,
208 					I915_BO_ALLOC_CONTIGUOUS);
209 		if (IS_ERR(obj)) {
210 			err = PTR_ERR(obj);
211 			goto err_close_objects;
212 		}
213 	}
214 
215 	close_objects(mem, &holes);
216 
217 	min = target;
218 	target = total >> 1;
219 
220 	/* Make sure we can still allocate all the fragmented space */
221 	obj = igt_object_create(mem, &objects, target, 0);
222 	if (IS_ERR(obj)) {
223 		err = PTR_ERR(obj);
224 		goto err_close_objects;
225 	}
226 
227 	igt_object_release(obj);
228 
229 	/*
230 	 * Even though we have enough free space, we don't have a big enough
231 	 * contiguous block. Make sure that holds true.
232 	 */
233 
234 	do {
235 		bool should_fail = target > min;
236 
237 		obj = igt_object_create(mem, &objects, target,
238 					I915_BO_ALLOC_CONTIGUOUS);
239 		if (should_fail != IS_ERR(obj)) {
240 			pr_err("%s target allocation(%llx) mismatch\n",
241 			       __func__, target);
242 			err = -EINVAL;
243 			goto err_close_objects;
244 		}
245 
246 		target >>= 1;
247 	} while (target >= mem->mm.chunk_size);
248 
249 err_close_objects:
250 	list_splice_tail(&holes, &objects);
251 	close_objects(mem, &objects);
252 	return err;
253 }
254 
255 int intel_memory_region_mock_selftests(void)
256 {
257 	static const struct i915_subtest tests[] = {
258 		SUBTEST(igt_mock_fill),
259 		SUBTEST(igt_mock_contiguous),
260 	};
261 	struct intel_memory_region *mem;
262 	struct drm_i915_private *i915;
263 	int err;
264 
265 	i915 = mock_gem_device();
266 	if (!i915)
267 		return -ENOMEM;
268 
269 	mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
270 	if (IS_ERR(mem)) {
271 		pr_err("failed to create memory region\n");
272 		err = PTR_ERR(mem);
273 		goto out_unref;
274 	}
275 
276 	err = i915_subtests(tests, mem);
277 
278 	intel_memory_region_put(mem);
279 out_unref:
280 	drm_dev_put(&i915->drm);
281 	return err;
282 }
283