xref: /linux/drivers/gpu/drm/i915/intel_memory_region.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/prandom.h>
7 
8 #include <uapi/drm/i915_drm.h>
9 
10 #include "intel_memory_region.h"
11 #include "i915_drv.h"
12 #include "i915_ttm_buddy_manager.h"
13 
14 static const struct {
15 	u16 class;
16 	u16 instance;
17 } intel_region_map[] = {
18 	[INTEL_REGION_SMEM] = {
19 		.class = INTEL_MEMORY_SYSTEM,
20 		.instance = 0,
21 	},
22 	[INTEL_REGION_LMEM_0] = {
23 		.class = INTEL_MEMORY_LOCAL,
24 		.instance = 0,
25 	},
26 	[INTEL_REGION_STOLEN_SMEM] = {
27 		.class = INTEL_MEMORY_STOLEN_SYSTEM,
28 		.instance = 0,
29 	},
30 	[INTEL_REGION_STOLEN_LMEM] = {
31 		.class = INTEL_MEMORY_STOLEN_LOCAL,
32 		.instance = 0,
33 	},
34 };
35 
36 static int __iopagetest(struct intel_memory_region *mem,
37 			u8 __iomem *va, int pagesize,
38 			u8 value, resource_size_t offset,
39 			const void *caller)
40 {
41 	int byte = get_random_u32_below(pagesize);
42 	u8 result[3];
43 
44 	memset_io(va, value, pagesize); /* or GPF! */
45 	wmb();
46 
47 	result[0] = ioread8(va);
48 	result[1] = ioread8(va + byte);
49 	result[2] = ioread8(va + pagesize - 1);
50 	if (memchr_inv(result, value, sizeof(result))) {
51 		dev_err(mem->i915->drm.dev,
52 			"Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
53 			&mem->region, &mem->io_start, &offset, caller,
54 			value, result[0], result[1], result[2]);
55 		return -EINVAL;
56 	}
57 
58 	return 0;
59 }
60 
61 static int iopagetest(struct intel_memory_region *mem,
62 		      resource_size_t offset,
63 		      const void *caller)
64 {
65 	const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 };
66 	void __iomem *va;
67 	int err;
68 	int i;
69 
70 	va = ioremap_wc(mem->io_start + offset, PAGE_SIZE);
71 	if (!va) {
72 		dev_err(mem->i915->drm.dev,
73 			"Failed to ioremap memory region [%pa + %pa] for %ps\n",
74 			&mem->io_start, &offset, caller);
75 		return -EFAULT;
76 	}
77 
78 	for (i = 0; i < ARRAY_SIZE(val); i++) {
79 		err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller);
80 		if (err)
81 			break;
82 
83 		err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller);
84 		if (err)
85 			break;
86 	}
87 
88 	iounmap(va);
89 	return err;
90 }
91 
92 static resource_size_t random_page(resource_size_t last)
93 {
94 	/* Limited to low 44b (16TiB), but should suffice for a spot check */
95 	return get_random_u32_below(last >> PAGE_SHIFT) << PAGE_SHIFT;
96 }
97 
98 static int iomemtest(struct intel_memory_region *mem,
99 		     bool test_all,
100 		     const void *caller)
101 {
102 	resource_size_t last, page;
103 	int err;
104 
105 	if (mem->io_size < PAGE_SIZE)
106 		return 0;
107 
108 	last = mem->io_size - PAGE_SIZE;
109 
110 	/*
111 	 * Quick test to check read/write access to the iomap (backing store).
112 	 *
113 	 * Write a byte, read it back. If the iomapping fails, we expect
114 	 * a GPF preventing further execution. If the backing store does not
115 	 * exist, the read back will return garbage. We check a couple of pages,
116 	 * the first and last of the specified region to confirm the backing
117 	 * store + iomap does cover the entire memory region; and we check
118 	 * a random offset within as a quick spot check for bad memory.
119 	 */
120 
121 	if (test_all) {
122 		for (page = 0; page <= last; page += PAGE_SIZE) {
123 			err = iopagetest(mem, page, caller);
124 			if (err)
125 				return err;
126 		}
127 	} else {
128 		err = iopagetest(mem, 0, caller);
129 		if (err)
130 			return err;
131 
132 		err = iopagetest(mem, last, caller);
133 		if (err)
134 			return err;
135 
136 		err = iopagetest(mem, random_page(last), caller);
137 		if (err)
138 			return err;
139 	}
140 
141 	return 0;
142 }
143 
144 struct intel_memory_region *
145 intel_memory_region_lookup(struct drm_i915_private *i915,
146 			   u16 class, u16 instance)
147 {
148 	struct intel_memory_region *mr;
149 	int id;
150 
151 	/* XXX: consider maybe converting to an rb tree at some point */
152 	for_each_memory_region(mr, i915, id) {
153 		if (mr->type == class && mr->instance == instance)
154 			return mr;
155 	}
156 
157 	return NULL;
158 }
159 
160 struct intel_memory_region *
161 intel_memory_region_by_type(struct drm_i915_private *i915,
162 			    enum intel_memory_type mem_type)
163 {
164 	struct intel_memory_region *mr;
165 	int id;
166 
167 	for_each_memory_region(mr, i915, id)
168 		if (mr->type == mem_type)
169 			return mr;
170 
171 	return NULL;
172 }
173 
174 /**
175  * intel_memory_region_reserve - Reserve a memory range
176  * @mem: The region for which we want to reserve a range.
177  * @offset: Start of the range to reserve.
178  * @size: The size of the range to reserve.
179  *
180  * Return: 0 on success, negative error code on failure.
181  */
182 int intel_memory_region_reserve(struct intel_memory_region *mem,
183 				resource_size_t offset,
184 				resource_size_t size)
185 {
186 	struct ttm_resource_manager *man = mem->region_private;
187 
188 	GEM_BUG_ON(mem->is_range_manager);
189 
190 	return i915_ttm_buddy_man_reserve(man, offset, size);
191 }
192 
193 void intel_memory_region_debug(struct intel_memory_region *mr,
194 			       struct drm_printer *printer)
195 {
196 	drm_printf(printer, "%s: ", mr->name);
197 
198 	if (mr->region_private)
199 		ttm_resource_manager_debug(mr->region_private, printer);
200 	else
201 		drm_printf(printer, "total:%pa bytes\n", &mr->total);
202 }
203 
204 static int intel_memory_region_memtest(struct intel_memory_region *mem,
205 				       void *caller)
206 {
207 	struct drm_i915_private *i915 = mem->i915;
208 	int err = 0;
209 
210 	if (!mem->io_start)
211 		return 0;
212 
213 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
214 		err = iomemtest(mem, i915->params.memtest, caller);
215 
216 	return err;
217 }
218 
219 struct intel_memory_region *
220 intel_memory_region_create(struct drm_i915_private *i915,
221 			   resource_size_t start,
222 			   resource_size_t size,
223 			   resource_size_t min_page_size,
224 			   resource_size_t io_start,
225 			   resource_size_t io_size,
226 			   u16 type,
227 			   u16 instance,
228 			   const struct intel_memory_region_ops *ops)
229 {
230 	struct intel_memory_region *mem;
231 	int err;
232 
233 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
234 	if (!mem)
235 		return ERR_PTR(-ENOMEM);
236 
237 	mem->i915 = i915;
238 	mem->region = DEFINE_RES_MEM(start, size);
239 	mem->io_start = io_start;
240 	mem->io_size = io_size;
241 	mem->min_page_size = min_page_size;
242 	mem->ops = ops;
243 	mem->total = size;
244 	mem->type = type;
245 	mem->instance = instance;
246 
247 	mutex_init(&mem->objects.lock);
248 	INIT_LIST_HEAD(&mem->objects.list);
249 
250 	if (ops->init) {
251 		err = ops->init(mem);
252 		if (err)
253 			goto err_free;
254 	}
255 
256 	err = intel_memory_region_memtest(mem, (void *)_RET_IP_);
257 	if (err)
258 		goto err_release;
259 
260 	return mem;
261 
262 err_release:
263 	if (mem->ops->release)
264 		mem->ops->release(mem);
265 err_free:
266 	kfree(mem);
267 	return ERR_PTR(err);
268 }
269 
270 void intel_memory_region_set_name(struct intel_memory_region *mem,
271 				  const char *fmt, ...)
272 {
273 	va_list ap;
274 
275 	va_start(ap, fmt);
276 	vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
277 	va_end(ap);
278 }
279 
280 void intel_memory_region_avail(struct intel_memory_region *mr,
281 			       u64 *avail, u64 *visible_avail)
282 {
283 	if (mr->type == INTEL_MEMORY_LOCAL) {
284 		i915_ttm_buddy_man_avail(mr->region_private,
285 					 avail, visible_avail);
286 		*avail <<= PAGE_SHIFT;
287 		*visible_avail <<= PAGE_SHIFT;
288 	} else {
289 		*avail = mr->total;
290 		*visible_avail = mr->total;
291 	}
292 }
293 
294 void intel_memory_region_destroy(struct intel_memory_region *mem)
295 {
296 	int ret = 0;
297 
298 	if (mem->ops->release)
299 		ret = mem->ops->release(mem);
300 
301 	GEM_WARN_ON(!list_empty_careful(&mem->objects.list));
302 	mutex_destroy(&mem->objects.lock);
303 	if (!ret)
304 		kfree(mem);
305 }
306 
307 /* Global memory region registration -- only slight layer inversions! */
308 
309 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
310 {
311 	int err, i;
312 
313 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
314 		struct intel_memory_region *mem = ERR_PTR(-ENODEV);
315 		u16 type, instance;
316 
317 		if (!HAS_REGION(i915, BIT(i)))
318 			continue;
319 
320 		type = intel_region_map[i].class;
321 		instance = intel_region_map[i].instance;
322 		switch (type) {
323 		case INTEL_MEMORY_SYSTEM:
324 			if (IS_DGFX(i915))
325 				mem = i915_gem_ttm_system_setup(i915, type,
326 								instance);
327 			else
328 				mem = i915_gem_shmem_setup(i915, type,
329 							   instance);
330 			break;
331 		case INTEL_MEMORY_STOLEN_LOCAL:
332 			mem = i915_gem_stolen_lmem_setup(i915, type, instance);
333 			if (!IS_ERR(mem))
334 				i915->mm.stolen_region = mem;
335 			break;
336 		case INTEL_MEMORY_STOLEN_SYSTEM:
337 			mem = i915_gem_stolen_smem_setup(i915, type, instance);
338 			if (!IS_ERR(mem))
339 				i915->mm.stolen_region = mem;
340 			break;
341 		default:
342 			continue;
343 		}
344 
345 		if (IS_ERR(mem)) {
346 			err = PTR_ERR(mem);
347 			drm_err(&i915->drm,
348 				"Failed to setup region(%d) type=%d\n",
349 				err, type);
350 			goto out_cleanup;
351 		}
352 
353 		mem->id = i;
354 		i915->mm.regions[i] = mem;
355 	}
356 
357 	return 0;
358 
359 out_cleanup:
360 	intel_memory_regions_driver_release(i915);
361 	return err;
362 }
363 
364 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
365 {
366 	int i;
367 
368 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
369 		struct intel_memory_region *region =
370 			fetch_and_zero(&i915->mm.regions[i]);
371 
372 		if (region)
373 			intel_memory_region_destroy(region);
374 	}
375 }
376 
377 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
378 #include "selftests/intel_memory_region.c"
379 #include "selftests/mock_region.c"
380 #endif
381