xref: /linux/drivers/gpu/drm/i915/selftests/scatterlist.c (revision be709d48329a500621d2a05835283150ae137b45)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <linux/prime_numbers.h>
25 #include <linux/random.h>
26 
27 #include "../i915_selftest.h"
28 
29 #define PFN_BIAS (1 << 10)
30 
31 struct pfn_table {
32 	struct sg_table st;
33 	unsigned long start, end;
34 };
35 
36 typedef unsigned int (*npages_fn_t)(unsigned long n,
37 				    unsigned long count,
38 				    struct rnd_state *rnd);
39 
40 static noinline int expect_pfn_sg(struct pfn_table *pt,
41 				  npages_fn_t npages_fn,
42 				  struct rnd_state *rnd,
43 				  const char *who,
44 				  unsigned long timeout)
45 {
46 	struct scatterlist *sg;
47 	unsigned long pfn, n;
48 
49 	pfn = pt->start;
50 	for_each_sg(pt->st.sgl, sg, pt->st.nents, n) {
51 		struct page *page = sg_page(sg);
52 		unsigned int npages = npages_fn(n, pt->st.nents, rnd);
53 
54 		if (page_to_pfn(page) != pfn) {
55 			pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg)\n",
56 			       __func__, who, pfn, page_to_pfn(page));
57 			return -EINVAL;
58 		}
59 
60 		if (sg->length != npages * PAGE_SIZE) {
61 			pr_err("%s: %s copied wrong sg length, expected size %lu, found %u (using for_each_sg)\n",
62 			       __func__, who, npages * PAGE_SIZE, sg->length);
63 			return -EINVAL;
64 		}
65 
66 		if (igt_timeout(timeout, "%s timed out\n", who))
67 			return -EINTR;
68 
69 		pfn += npages;
70 	}
71 	if (pfn != pt->end) {
72 		pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
73 		       __func__, who, pt->end, pfn);
74 		return -EINVAL;
75 	}
76 
77 	return 0;
78 }
79 
80 static noinline int expect_pfn_sg_page_iter(struct pfn_table *pt,
81 					    const char *who,
82 					    unsigned long timeout)
83 {
84 	struct sg_page_iter sgiter;
85 	unsigned long pfn;
86 
87 	pfn = pt->start;
88 	for_each_sg_page(pt->st.sgl, &sgiter, pt->st.nents, 0) {
89 		struct page *page = sg_page_iter_page(&sgiter);
90 
91 		if (page != pfn_to_page(pfn)) {
92 			pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg_page)\n",
93 			       __func__, who, pfn, page_to_pfn(page));
94 			return -EINVAL;
95 		}
96 
97 		if (igt_timeout(timeout, "%s timed out\n", who))
98 			return -EINTR;
99 
100 		pfn++;
101 	}
102 	if (pfn != pt->end) {
103 		pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
104 		       __func__, who, pt->end, pfn);
105 		return -EINVAL;
106 	}
107 
108 	return 0;
109 }
110 
111 static noinline int expect_pfn_sgtiter(struct pfn_table *pt,
112 				       const char *who,
113 				       unsigned long timeout)
114 {
115 	struct sgt_iter sgt;
116 	struct page *page;
117 	unsigned long pfn;
118 
119 	pfn = pt->start;
120 	for_each_sgt_page(page, sgt, &pt->st) {
121 		if (page != pfn_to_page(pfn)) {
122 			pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sgt_page)\n",
123 			       __func__, who, pfn, page_to_pfn(page));
124 			return -EINVAL;
125 		}
126 
127 		if (igt_timeout(timeout, "%s timed out\n", who))
128 			return -EINTR;
129 
130 		pfn++;
131 	}
132 	if (pfn != pt->end) {
133 		pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
134 		       __func__, who, pt->end, pfn);
135 		return -EINVAL;
136 	}
137 
138 	return 0;
139 }
140 
141 static int expect_pfn_sgtable(struct pfn_table *pt,
142 			      npages_fn_t npages_fn,
143 			      struct rnd_state *rnd,
144 			      const char *who,
145 			      unsigned long timeout)
146 {
147 	int err;
148 
149 	err = expect_pfn_sg(pt, npages_fn, rnd, who, timeout);
150 	if (err)
151 		return err;
152 
153 	err = expect_pfn_sg_page_iter(pt, who, timeout);
154 	if (err)
155 		return err;
156 
157 	err = expect_pfn_sgtiter(pt, who, timeout);
158 	if (err)
159 		return err;
160 
161 	return 0;
162 }
163 
164 static unsigned int one(unsigned long n,
165 			unsigned long count,
166 			struct rnd_state *rnd)
167 {
168 	return 1;
169 }
170 
171 static unsigned int grow(unsigned long n,
172 			 unsigned long count,
173 			 struct rnd_state *rnd)
174 {
175 	return n + 1;
176 }
177 
178 static unsigned int shrink(unsigned long n,
179 			   unsigned long count,
180 			   struct rnd_state *rnd)
181 {
182 	return count - n;
183 }
184 
185 static unsigned int random(unsigned long n,
186 			   unsigned long count,
187 			   struct rnd_state *rnd)
188 {
189 	return 1 + (prandom_u32_state(rnd) % 1024);
190 }
191 
192 static unsigned int random_page_size_pages(unsigned long n,
193 					   unsigned long count,
194 					   struct rnd_state *rnd)
195 {
196 	/* 4K, 64K, 2M */
197 	static unsigned int page_count[] = {
198 		BIT(12) >> PAGE_SHIFT,
199 		BIT(16) >> PAGE_SHIFT,
200 		BIT(21) >> PAGE_SHIFT,
201 	};
202 
203 	return page_count[(prandom_u32_state(rnd) % 3)];
204 }
205 
206 static inline bool page_contiguous(struct page *first,
207 				   struct page *last,
208 				   unsigned long npages)
209 {
210 	return first + npages == last;
211 }
212 
213 static int alloc_table(struct pfn_table *pt,
214 		       unsigned long count, unsigned long max,
215 		       npages_fn_t npages_fn,
216 		       struct rnd_state *rnd,
217 		       int alloc_error)
218 {
219 	struct scatterlist *sg;
220 	unsigned long n, pfn;
221 
222 	if (sg_alloc_table(&pt->st, max,
223 			   GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN))
224 		return alloc_error;
225 
226 	/* count should be less than 20 to prevent overflowing sg->length */
227 	GEM_BUG_ON(overflows_type(count * PAGE_SIZE, sg->length));
228 
229 	/* Construct a table where each scatterlist contains different number
230 	 * of entries. The idea is to check that we can iterate the individual
231 	 * pages from inside the coalesced lists.
232 	 */
233 	pt->start = PFN_BIAS;
234 	pfn = pt->start;
235 	sg = pt->st.sgl;
236 	for (n = 0; n < count; n++) {
237 		unsigned long npages = npages_fn(n, count, rnd);
238 
239 		/* Nobody expects the Sparse Memmap! */
240 		if (!page_contiguous(pfn_to_page(pfn),
241 				     pfn_to_page(pfn + npages),
242 				     npages)) {
243 			sg_free_table(&pt->st);
244 			return -ENOSPC;
245 		}
246 
247 		if (n)
248 			sg = sg_next(sg);
249 		sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0);
250 
251 		GEM_BUG_ON(page_to_pfn(sg_page(sg)) != pfn);
252 		GEM_BUG_ON(sg->length != npages * PAGE_SIZE);
253 		GEM_BUG_ON(sg->offset != 0);
254 
255 		pfn += npages;
256 	}
257 	sg_mark_end(sg);
258 	pt->st.nents = n;
259 	pt->end = pfn;
260 
261 	return 0;
262 }
263 
264 static const npages_fn_t npages_funcs[] = {
265 	one,
266 	grow,
267 	shrink,
268 	random,
269 	random_page_size_pages,
270 	NULL,
271 };
272 
273 static int igt_sg_alloc(void *ignored)
274 {
275 	IGT_TIMEOUT(end_time);
276 	const unsigned long max_order = 20; /* approximating a 4GiB object */
277 	struct rnd_state prng;
278 	unsigned long prime;
279 	int alloc_error = -ENOMEM;
280 
281 	for_each_prime_number(prime, max_order) {
282 		unsigned long size = BIT(prime);
283 		int offset;
284 
285 		for (offset = -1; offset <= 1; offset++) {
286 			unsigned long sz = size + offset;
287 			const npages_fn_t *npages;
288 			struct pfn_table pt;
289 			int err;
290 
291 			for (npages = npages_funcs; *npages; npages++) {
292 				prandom_seed_state(&prng,
293 						   i915_selftest.random_seed);
294 				err = alloc_table(&pt, sz, sz, *npages, &prng,
295 						  alloc_error);
296 				if (err == -ENOSPC)
297 					break;
298 				if (err)
299 					return err;
300 
301 				prandom_seed_state(&prng,
302 						   i915_selftest.random_seed);
303 				err = expect_pfn_sgtable(&pt, *npages, &prng,
304 							 "sg_alloc_table",
305 							 end_time);
306 				sg_free_table(&pt.st);
307 				if (err)
308 					return err;
309 			}
310 		}
311 
312 		/* Test at least one continuation before accepting oom */
313 		if (size > SG_MAX_SINGLE_ALLOC)
314 			alloc_error = -ENOSPC;
315 	}
316 
317 	return 0;
318 }
319 
320 static int igt_sg_trim(void *ignored)
321 {
322 	IGT_TIMEOUT(end_time);
323 	const unsigned long max = PAGE_SIZE; /* not prime! */
324 	struct pfn_table pt;
325 	unsigned long prime;
326 	int alloc_error = -ENOMEM;
327 
328 	for_each_prime_number(prime, max) {
329 		const npages_fn_t *npages;
330 		int err;
331 
332 		for (npages = npages_funcs; *npages; npages++) {
333 			struct rnd_state prng;
334 
335 			prandom_seed_state(&prng, i915_selftest.random_seed);
336 			err = alloc_table(&pt, prime, max, *npages, &prng,
337 					  alloc_error);
338 			if (err == -ENOSPC)
339 				break;
340 			if (err)
341 				return err;
342 
343 			if (i915_sg_trim(&pt.st)) {
344 				if (pt.st.orig_nents != prime ||
345 				    pt.st.nents != prime) {
346 					pr_err("i915_sg_trim failed (nents %u, orig_nents %u), expected %lu\n",
347 					       pt.st.nents, pt.st.orig_nents, prime);
348 					err = -EINVAL;
349 				} else {
350 					prandom_seed_state(&prng,
351 							   i915_selftest.random_seed);
352 					err = expect_pfn_sgtable(&pt,
353 								 *npages, &prng,
354 								 "i915_sg_trim",
355 								 end_time);
356 				}
357 			}
358 			sg_free_table(&pt.st);
359 			if (err)
360 				return err;
361 		}
362 
363 		/* Test at least one continuation before accepting oom */
364 		if (prime > SG_MAX_SINGLE_ALLOC)
365 			alloc_error = -ENOSPC;
366 	}
367 
368 	return 0;
369 }
370 
371 int scatterlist_mock_selftests(void)
372 {
373 	static const struct i915_subtest tests[] = {
374 		SUBTEST(igt_sg_alloc),
375 		SUBTEST(igt_sg_trim),
376 	};
377 
378 	return i915_subtests(tests, NULL);
379 }
380