xref: /linux/drivers/gpu/drm/i915/intel_memory_region.c (revision 74ba587f402d5501af2c85e50cf1e4044263b6ca)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/prandom.h>
7 
8 #include <drm/drm_print.h>
9 #include <uapi/drm/i915_drm.h>
10 
11 #include "intel_memory_region.h"
12 #include "i915_drv.h"
13 #include "i915_ttm_buddy_manager.h"
14 
15 static const struct {
16 	u16 class;
17 	u16 instance;
18 } intel_region_map[] = {
19 	[INTEL_REGION_SMEM] = {
20 		.class = INTEL_MEMORY_SYSTEM,
21 		.instance = 0,
22 	},
23 	[INTEL_REGION_LMEM_0] = {
24 		.class = INTEL_MEMORY_LOCAL,
25 		.instance = 0,
26 	},
27 	[INTEL_REGION_STOLEN_SMEM] = {
28 		.class = INTEL_MEMORY_STOLEN_SYSTEM,
29 		.instance = 0,
30 	},
31 	[INTEL_REGION_STOLEN_LMEM] = {
32 		.class = INTEL_MEMORY_STOLEN_LOCAL,
33 		.instance = 0,
34 	},
35 };
36 
37 static int __iopagetest(struct intel_memory_region *mem,
38 			u8 __iomem *va, int pagesize,
39 			u8 value, resource_size_t offset,
40 			const void *caller)
41 {
42 	int byte = get_random_u32_below(pagesize);
43 	u8 result[3];
44 
45 	memset_io(va, value, pagesize); /* or GPF! */
46 	wmb();
47 
48 	result[0] = ioread8(va);
49 	result[1] = ioread8(va + byte);
50 	result[2] = ioread8(va + pagesize - 1);
51 	if (memchr_inv(result, value, sizeof(result))) {
52 		dev_err(mem->i915->drm.dev,
53 			"Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
54 			&mem->region, &mem->io.start, &offset, caller,
55 			value, result[0], result[1], result[2]);
56 		return -EINVAL;
57 	}
58 
59 	return 0;
60 }
61 
62 static int iopagetest(struct intel_memory_region *mem,
63 		      resource_size_t offset,
64 		      const void *caller)
65 {
66 	const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 };
67 	void __iomem *va;
68 	int err;
69 	int i;
70 
71 	va = ioremap_wc(mem->io.start + offset, PAGE_SIZE);
72 	if (!va) {
73 		dev_err(mem->i915->drm.dev,
74 			"Failed to ioremap memory region [%pa + %pa] for %ps\n",
75 			&mem->io.start, &offset, caller);
76 		return -EFAULT;
77 	}
78 
79 	for (i = 0; i < ARRAY_SIZE(val); i++) {
80 		err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller);
81 		if (err)
82 			break;
83 
84 		err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller);
85 		if (err)
86 			break;
87 	}
88 
89 	iounmap(va);
90 	return err;
91 }
92 
93 static resource_size_t random_page(resource_size_t last)
94 {
95 	/* Limited to low 44b (16TiB), but should suffice for a spot check */
96 	return get_random_u32_below(last >> PAGE_SHIFT) << PAGE_SHIFT;
97 }
98 
99 static int iomemtest(struct intel_memory_region *mem,
100 		     bool test_all,
101 		     const void *caller)
102 {
103 	resource_size_t last, page;
104 	int err;
105 
106 	if (resource_size(&mem->io) < PAGE_SIZE)
107 		return 0;
108 
109 	last = resource_size(&mem->io) - PAGE_SIZE;
110 
111 	/*
112 	 * Quick test to check read/write access to the iomap (backing store).
113 	 *
114 	 * Write a byte, read it back. If the iomapping fails, we expect
115 	 * a GPF preventing further execution. If the backing store does not
116 	 * exist, the read back will return garbage. We check a couple of pages,
117 	 * the first and last of the specified region to confirm the backing
118 	 * store + iomap does cover the entire memory region; and we check
119 	 * a random offset within as a quick spot check for bad memory.
120 	 */
121 
122 	if (test_all) {
123 		for (page = 0; page <= last; page += PAGE_SIZE) {
124 			err = iopagetest(mem, page, caller);
125 			if (err)
126 				return err;
127 		}
128 	} else {
129 		err = iopagetest(mem, 0, caller);
130 		if (err)
131 			return err;
132 
133 		err = iopagetest(mem, last, caller);
134 		if (err)
135 			return err;
136 
137 		err = iopagetest(mem, random_page(last), caller);
138 		if (err)
139 			return err;
140 	}
141 
142 	return 0;
143 }
144 
145 struct intel_memory_region *
146 intel_memory_region_lookup(struct drm_i915_private *i915,
147 			   u16 class, u16 instance)
148 {
149 	struct intel_memory_region *mr;
150 	int id;
151 
152 	/* XXX: consider maybe converting to an rb tree at some point */
153 	for_each_memory_region(mr, i915, id) {
154 		if (mr->type == class && mr->instance == instance)
155 			return mr;
156 	}
157 
158 	return NULL;
159 }
160 
161 struct intel_memory_region *
162 intel_memory_region_by_type(struct drm_i915_private *i915,
163 			    enum intel_memory_type mem_type)
164 {
165 	struct intel_memory_region *mr;
166 	int id;
167 
168 	for_each_memory_region(mr, i915, id)
169 		if (mr->type == mem_type)
170 			return mr;
171 
172 	return NULL;
173 }
174 
175 bool intel_memory_type_is_local(enum intel_memory_type mem_type)
176 {
177 	switch (mem_type) {
178 	case INTEL_MEMORY_LOCAL:
179 	case INTEL_MEMORY_STOLEN_LOCAL:
180 		return true;
181 	default:
182 		return false;
183 	}
184 }
185 
186 /**
187  * intel_memory_region_reserve - Reserve a memory range
188  * @mem: The region for which we want to reserve a range.
189  * @offset: Start of the range to reserve.
190  * @size: The size of the range to reserve.
191  *
192  * Return: 0 on success, negative error code on failure.
193  */
194 int intel_memory_region_reserve(struct intel_memory_region *mem,
195 				resource_size_t offset,
196 				resource_size_t size)
197 {
198 	struct ttm_resource_manager *man = mem->region_private;
199 
200 	GEM_BUG_ON(mem->is_range_manager);
201 
202 	return i915_ttm_buddy_man_reserve(man, offset, size);
203 }
204 
205 void intel_memory_region_debug(struct intel_memory_region *mr,
206 			       struct drm_printer *printer)
207 {
208 	drm_printf(printer, "%s: ", mr->name);
209 
210 	if (mr->region_private)
211 		ttm_resource_manager_debug(mr->region_private, printer);
212 	else
213 		drm_printf(printer, "total:%pa bytes\n", &mr->total);
214 }
215 
216 static int intel_memory_region_memtest(struct intel_memory_region *mem,
217 				       void *caller)
218 {
219 	struct drm_i915_private *i915 = mem->i915;
220 	int err = 0;
221 
222 	if (!mem->io.start)
223 		return 0;
224 
225 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
226 		err = iomemtest(mem, i915->params.memtest, caller);
227 
228 	return err;
229 }
230 
231 const char *intel_memory_type_str(enum intel_memory_type type)
232 {
233 	switch (type) {
234 	case INTEL_MEMORY_SYSTEM:
235 		return "system";
236 	case INTEL_MEMORY_LOCAL:
237 		return "local";
238 	case INTEL_MEMORY_STOLEN_LOCAL:
239 		return "stolen-local";
240 	case INTEL_MEMORY_STOLEN_SYSTEM:
241 		return "stolen-system";
242 	default:
243 		return "unknown";
244 	}
245 }
246 
247 struct intel_memory_region *
248 intel_memory_region_create(struct drm_i915_private *i915,
249 			   resource_size_t start,
250 			   resource_size_t size,
251 			   resource_size_t min_page_size,
252 			   resource_size_t io_start,
253 			   resource_size_t io_size,
254 			   u16 type,
255 			   u16 instance,
256 			   const struct intel_memory_region_ops *ops)
257 {
258 	struct intel_memory_region *mem;
259 	int err;
260 
261 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
262 	if (!mem)
263 		return ERR_PTR(-ENOMEM);
264 
265 	mem->i915 = i915;
266 	mem->region = DEFINE_RES_MEM(start, size);
267 	mem->io = DEFINE_RES_MEM(io_start, io_size);
268 	mem->min_page_size = min_page_size;
269 	mem->ops = ops;
270 	mem->total = size;
271 	mem->type = type;
272 	mem->instance = instance;
273 
274 	snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
275 		 intel_memory_type_str(type), instance);
276 
277 	mutex_init(&mem->objects.lock);
278 	INIT_LIST_HEAD(&mem->objects.list);
279 
280 	if (ops->init) {
281 		err = ops->init(mem);
282 		if (err)
283 			goto err_free;
284 	}
285 
286 	err = intel_memory_region_memtest(mem, (void *)_RET_IP_);
287 	if (err)
288 		goto err_release;
289 
290 	return mem;
291 
292 err_release:
293 	if (mem->ops->release)
294 		mem->ops->release(mem);
295 err_free:
296 	kfree(mem);
297 	return ERR_PTR(err);
298 }
299 
300 void intel_memory_region_set_name(struct intel_memory_region *mem,
301 				  const char *fmt, ...)
302 {
303 	va_list ap;
304 
305 	va_start(ap, fmt);
306 	vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
307 	va_end(ap);
308 }
309 
310 void intel_memory_region_avail(struct intel_memory_region *mr,
311 			       u64 *avail, u64 *visible_avail)
312 {
313 	if (mr->type == INTEL_MEMORY_LOCAL) {
314 		i915_ttm_buddy_man_avail(mr->region_private,
315 					 avail, visible_avail);
316 		*avail <<= PAGE_SHIFT;
317 		*visible_avail <<= PAGE_SHIFT;
318 	} else {
319 		*avail = mr->total;
320 		*visible_avail = mr->total;
321 	}
322 }
323 
324 void intel_memory_region_destroy(struct intel_memory_region *mem)
325 {
326 	int ret = 0;
327 
328 	if (mem->ops->release)
329 		ret = mem->ops->release(mem);
330 
331 	GEM_WARN_ON(!list_empty_careful(&mem->objects.list));
332 	mutex_destroy(&mem->objects.lock);
333 	if (!ret)
334 		kfree(mem);
335 }
336 
337 /* Global memory region registration -- only slight layer inversions! */
338 
339 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
340 {
341 	int err, i;
342 
343 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
344 		struct intel_memory_region *mem = ERR_PTR(-ENODEV);
345 		u16 type, instance;
346 
347 		if (!HAS_REGION(i915, i))
348 			continue;
349 
350 		type = intel_region_map[i].class;
351 		instance = intel_region_map[i].instance;
352 		switch (type) {
353 		case INTEL_MEMORY_SYSTEM:
354 			if (IS_DGFX(i915))
355 				mem = i915_gem_ttm_system_setup(i915, type,
356 								instance);
357 			else
358 				mem = i915_gem_shmem_setup(i915, type,
359 							   instance);
360 			break;
361 		case INTEL_MEMORY_STOLEN_LOCAL:
362 			mem = i915_gem_stolen_lmem_setup(i915, type, instance);
363 			if (!IS_ERR(mem))
364 				i915->mm.stolen_region = mem;
365 			break;
366 		case INTEL_MEMORY_STOLEN_SYSTEM:
367 			mem = i915_gem_stolen_smem_setup(i915, type, instance);
368 			if (!IS_ERR(mem))
369 				i915->mm.stolen_region = mem;
370 			break;
371 		default:
372 			continue;
373 		}
374 
375 		if (IS_ERR(mem)) {
376 			err = PTR_ERR(mem);
377 			drm_err(&i915->drm,
378 				"Failed to setup region(%d) type=%d\n",
379 				err, type);
380 			goto out_cleanup;
381 		}
382 
383 		if (mem) { /* Skip on non-fatal errors */
384 			mem->id = i;
385 			i915->mm.regions[i] = mem;
386 		}
387 	}
388 
389 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
390 		struct intel_memory_region *mem = i915->mm.regions[i];
391 		u64 region_size, io_size;
392 
393 		if (!mem)
394 			continue;
395 
396 		region_size = resource_size(&mem->region) >> 20;
397 		io_size = resource_size(&mem->io) >> 20;
398 
399 		if (resource_size(&mem->io))
400 			drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: %llu MiB %pR\n",
401 				mem->id, mem->name, region_size, &mem->region, io_size, &mem->io);
402 		else
403 			drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: n/a\n",
404 				mem->id, mem->name, region_size, &mem->region);
405 	}
406 
407 	return 0;
408 
409 out_cleanup:
410 	intel_memory_regions_driver_release(i915);
411 	return err;
412 }
413 
414 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
415 {
416 	int i;
417 
418 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
419 		struct intel_memory_region *region =
420 			fetch_and_zero(&i915->mm.regions[i]);
421 
422 		if (region)
423 			intel_memory_region_destroy(region);
424 	}
425 }
426 
427 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
428 #include "selftests/intel_memory_region.c"
429 #include "selftests/mock_region.c"
430 #endif
431