xref: /linux/drivers/gpu/drm/i915/selftests/scatterlist.c (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <linux/prime_numbers.h>
25 #include <linux/random.h>
26 
27 #include "i915_selftest.h"
28 #include "i915_utils.h"
29 
30 #define PFN_BIAS (1 << 10)
31 
32 struct pfn_table {
33 	struct sg_table st;
34 	unsigned long start, end;
35 };
36 
37 typedef unsigned int (*npages_fn_t)(unsigned long n,
38 				    unsigned long count,
39 				    struct rnd_state *rnd);
40 
expect_pfn_sg(struct pfn_table * pt,npages_fn_t npages_fn,struct rnd_state * rnd,const char * who,unsigned long timeout)41 static noinline int expect_pfn_sg(struct pfn_table *pt,
42 				  npages_fn_t npages_fn,
43 				  struct rnd_state *rnd,
44 				  const char *who,
45 				  unsigned long timeout)
46 {
47 	struct scatterlist *sg;
48 	unsigned long pfn, n;
49 
50 	pfn = pt->start;
51 	for_each_sg(pt->st.sgl, sg, pt->st.nents, n) {
52 		struct page *page = sg_page(sg);
53 		unsigned int npages = npages_fn(n, pt->st.nents, rnd);
54 
55 		if (page_to_pfn(page) != pfn) {
56 			pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg)\n",
57 			       __func__, who, pfn, page_to_pfn(page));
58 			return -EINVAL;
59 		}
60 
61 		if (sg->length != npages * PAGE_SIZE) {
62 			pr_err("%s: %s copied wrong sg length, expected size %lu, found %u (using for_each_sg)\n",
63 			       __func__, who, npages * PAGE_SIZE, sg->length);
64 			return -EINVAL;
65 		}
66 
67 		if (igt_timeout(timeout, "%s timed out\n", who))
68 			return -EINTR;
69 
70 		pfn += npages;
71 	}
72 	if (pfn != pt->end) {
73 		pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
74 		       __func__, who, pt->end, pfn);
75 		return -EINVAL;
76 	}
77 
78 	return 0;
79 }
80 
expect_pfn_sg_page_iter(struct pfn_table * pt,const char * who,unsigned long timeout)81 static noinline int expect_pfn_sg_page_iter(struct pfn_table *pt,
82 					    const char *who,
83 					    unsigned long timeout)
84 {
85 	struct sg_page_iter sgiter;
86 	unsigned long pfn;
87 
88 	pfn = pt->start;
89 	for_each_sg_page(pt->st.sgl, &sgiter, pt->st.nents, 0) {
90 		struct page *page = sg_page_iter_page(&sgiter);
91 
92 		if (page != pfn_to_page(pfn)) {
93 			pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg_page)\n",
94 			       __func__, who, pfn, page_to_pfn(page));
95 			return -EINVAL;
96 		}
97 
98 		if (igt_timeout(timeout, "%s timed out\n", who))
99 			return -EINTR;
100 
101 		pfn++;
102 	}
103 	if (pfn != pt->end) {
104 		pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
105 		       __func__, who, pt->end, pfn);
106 		return -EINVAL;
107 	}
108 
109 	return 0;
110 }
111 
expect_pfn_sgtiter(struct pfn_table * pt,const char * who,unsigned long timeout)112 static noinline int expect_pfn_sgtiter(struct pfn_table *pt,
113 				       const char *who,
114 				       unsigned long timeout)
115 {
116 	struct sgt_iter sgt;
117 	struct page *page;
118 	unsigned long pfn;
119 
120 	pfn = pt->start;
121 	for_each_sgt_page(page, sgt, &pt->st) {
122 		if (page != pfn_to_page(pfn)) {
123 			pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sgt_page)\n",
124 			       __func__, who, pfn, page_to_pfn(page));
125 			return -EINVAL;
126 		}
127 
128 		if (igt_timeout(timeout, "%s timed out\n", who))
129 			return -EINTR;
130 
131 		pfn++;
132 	}
133 	if (pfn != pt->end) {
134 		pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
135 		       __func__, who, pt->end, pfn);
136 		return -EINVAL;
137 	}
138 
139 	return 0;
140 }
141 
expect_pfn_sgtable(struct pfn_table * pt,npages_fn_t npages_fn,struct rnd_state * rnd,const char * who,unsigned long timeout)142 static int expect_pfn_sgtable(struct pfn_table *pt,
143 			      npages_fn_t npages_fn,
144 			      struct rnd_state *rnd,
145 			      const char *who,
146 			      unsigned long timeout)
147 {
148 	int err;
149 
150 	err = expect_pfn_sg(pt, npages_fn, rnd, who, timeout);
151 	if (err)
152 		return err;
153 
154 	err = expect_pfn_sg_page_iter(pt, who, timeout);
155 	if (err)
156 		return err;
157 
158 	err = expect_pfn_sgtiter(pt, who, timeout);
159 	if (err)
160 		return err;
161 
162 	return 0;
163 }
164 
one(unsigned long n,unsigned long count,struct rnd_state * rnd)165 static unsigned int one(unsigned long n,
166 			unsigned long count,
167 			struct rnd_state *rnd)
168 {
169 	return 1;
170 }
171 
grow(unsigned long n,unsigned long count,struct rnd_state * rnd)172 static unsigned int grow(unsigned long n,
173 			 unsigned long count,
174 			 struct rnd_state *rnd)
175 {
176 	return n + 1;
177 }
178 
shrink(unsigned long n,unsigned long count,struct rnd_state * rnd)179 static unsigned int shrink(unsigned long n,
180 			   unsigned long count,
181 			   struct rnd_state *rnd)
182 {
183 	return count - n;
184 }
185 
random(unsigned long n,unsigned long count,struct rnd_state * rnd)186 static unsigned int random(unsigned long n,
187 			   unsigned long count,
188 			   struct rnd_state *rnd)
189 {
190 	return 1 + (prandom_u32_state(rnd) % 1024);
191 }
192 
random_page_size_pages(unsigned long n,unsigned long count,struct rnd_state * rnd)193 static unsigned int random_page_size_pages(unsigned long n,
194 					   unsigned long count,
195 					   struct rnd_state *rnd)
196 {
197 	/* 4K, 64K, 2M */
198 	static unsigned int page_count[] = {
199 		BIT(12) >> PAGE_SHIFT,
200 		BIT(16) >> PAGE_SHIFT,
201 		BIT(21) >> PAGE_SHIFT,
202 	};
203 
204 	return page_count[(prandom_u32_state(rnd) % 3)];
205 }
206 
page_contiguous(struct page * first,struct page * last,unsigned long npages)207 static inline bool page_contiguous(struct page *first,
208 				   struct page *last,
209 				   unsigned long npages)
210 {
211 	return first + npages == last;
212 }
213 
alloc_table(struct pfn_table * pt,unsigned long count,unsigned long max,npages_fn_t npages_fn,struct rnd_state * rnd,int alloc_error)214 static int alloc_table(struct pfn_table *pt,
215 		       unsigned long count, unsigned long max,
216 		       npages_fn_t npages_fn,
217 		       struct rnd_state *rnd,
218 		       int alloc_error)
219 {
220 	struct scatterlist *sg;
221 	unsigned long n, pfn;
222 
223 	/* restricted by sg_alloc_table */
224 	if (overflows_type(max, unsigned int))
225 		return -E2BIG;
226 
227 	if (sg_alloc_table(&pt->st, max,
228 			   GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN))
229 		return alloc_error;
230 
231 	/* count should be less than 20 to prevent overflowing sg->length */
232 	GEM_BUG_ON(overflows_type(count * PAGE_SIZE, sg->length));
233 
234 	/* Construct a table where each scatterlist contains different number
235 	 * of entries. The idea is to check that we can iterate the individual
236 	 * pages from inside the coalesced lists.
237 	 */
238 	pt->start = PFN_BIAS;
239 	pfn = pt->start;
240 	sg = pt->st.sgl;
241 	for (n = 0; n < count; n++) {
242 		unsigned long npages = npages_fn(n, count, rnd);
243 
244 		/* Nobody expects the Sparse Memmap! */
245 		if (!page_contiguous(pfn_to_page(pfn),
246 				     pfn_to_page(pfn + npages),
247 				     npages)) {
248 			sg_free_table(&pt->st);
249 			return -ENOSPC;
250 		}
251 
252 		if (n)
253 			sg = sg_next(sg);
254 		sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0);
255 
256 		GEM_BUG_ON(page_to_pfn(sg_page(sg)) != pfn);
257 		GEM_BUG_ON(sg->length != npages * PAGE_SIZE);
258 		GEM_BUG_ON(sg->offset != 0);
259 
260 		pfn += npages;
261 	}
262 	sg_mark_end(sg);
263 	pt->st.nents = n;
264 	pt->end = pfn;
265 
266 	return 0;
267 }
268 
269 static const npages_fn_t npages_funcs[] = {
270 	one,
271 	grow,
272 	shrink,
273 	random,
274 	random_page_size_pages,
275 	NULL,
276 };
277 
igt_sg_alloc(void * ignored)278 static int igt_sg_alloc(void *ignored)
279 {
280 	IGT_TIMEOUT(end_time);
281 	const unsigned long max_order = 20; /* approximating a 4GiB object */
282 	struct rnd_state prng;
283 	unsigned long prime;
284 	int alloc_error = -ENOMEM;
285 
286 	for_each_prime_number(prime, max_order) {
287 		unsigned long size = BIT(prime);
288 		int offset;
289 
290 		for (offset = -1; offset <= 1; offset++) {
291 			unsigned long sz = size + offset;
292 			const npages_fn_t *npages;
293 			struct pfn_table pt;
294 			int err;
295 
296 			for (npages = npages_funcs; *npages; npages++) {
297 				prandom_seed_state(&prng,
298 						   i915_selftest.random_seed);
299 				err = alloc_table(&pt, sz, sz, *npages, &prng,
300 						  alloc_error);
301 				if (err == -ENOSPC)
302 					break;
303 				if (err)
304 					return err;
305 
306 				prandom_seed_state(&prng,
307 						   i915_selftest.random_seed);
308 				err = expect_pfn_sgtable(&pt, *npages, &prng,
309 							 "sg_alloc_table",
310 							 end_time);
311 				sg_free_table(&pt.st);
312 				if (err)
313 					return err;
314 			}
315 		}
316 
317 		/* Test at least one continuation before accepting oom */
318 		if (size > SG_MAX_SINGLE_ALLOC)
319 			alloc_error = -ENOSPC;
320 	}
321 
322 	return 0;
323 }
324 
igt_sg_trim(void * ignored)325 static int igt_sg_trim(void *ignored)
326 {
327 	IGT_TIMEOUT(end_time);
328 	const unsigned long max = PAGE_SIZE; /* not prime! */
329 	struct pfn_table pt;
330 	unsigned long prime;
331 	int alloc_error = -ENOMEM;
332 
333 	for_each_prime_number(prime, max) {
334 		const npages_fn_t *npages;
335 		int err;
336 
337 		for (npages = npages_funcs; *npages; npages++) {
338 			struct rnd_state prng;
339 
340 			prandom_seed_state(&prng, i915_selftest.random_seed);
341 			err = alloc_table(&pt, prime, max, *npages, &prng,
342 					  alloc_error);
343 			if (err == -ENOSPC)
344 				break;
345 			if (err)
346 				return err;
347 
348 			if (i915_sg_trim(&pt.st)) {
349 				if (pt.st.orig_nents != prime ||
350 				    pt.st.nents != prime) {
351 					pr_err("i915_sg_trim failed (nents %u, orig_nents %u), expected %lu\n",
352 					       pt.st.nents, pt.st.orig_nents, prime);
353 					err = -EINVAL;
354 				} else {
355 					prandom_seed_state(&prng,
356 							   i915_selftest.random_seed);
357 					err = expect_pfn_sgtable(&pt,
358 								 *npages, &prng,
359 								 "i915_sg_trim",
360 								 end_time);
361 				}
362 			}
363 			sg_free_table(&pt.st);
364 			if (err)
365 				return err;
366 		}
367 
368 		/* Test at least one continuation before accepting oom */
369 		if (prime > SG_MAX_SINGLE_ALLOC)
370 			alloc_error = -ENOSPC;
371 	}
372 
373 	return 0;
374 }
375 
scatterlist_mock_selftests(void)376 int scatterlist_mock_selftests(void)
377 {
378 	static const struct i915_subtest tests[] = {
379 		SUBTEST(igt_sg_alloc),
380 		SUBTEST(igt_sg_trim),
381 	};
382 
383 	return i915_subtests(tests, NULL);
384 }
385