1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <linux/prandom.h>
7
8 #include <uapi/drm/i915_drm.h>
9
10 #include "intel_memory_region.h"
11 #include "i915_drv.h"
12 #include "i915_ttm_buddy_manager.h"
13
14 static const struct {
15 u16 class;
16 u16 instance;
17 } intel_region_map[] = {
18 [INTEL_REGION_SMEM] = {
19 .class = INTEL_MEMORY_SYSTEM,
20 .instance = 0,
21 },
22 [INTEL_REGION_LMEM_0] = {
23 .class = INTEL_MEMORY_LOCAL,
24 .instance = 0,
25 },
26 [INTEL_REGION_STOLEN_SMEM] = {
27 .class = INTEL_MEMORY_STOLEN_SYSTEM,
28 .instance = 0,
29 },
30 [INTEL_REGION_STOLEN_LMEM] = {
31 .class = INTEL_MEMORY_STOLEN_LOCAL,
32 .instance = 0,
33 },
34 };
35
__iopagetest(struct intel_memory_region * mem,u8 __iomem * va,int pagesize,u8 value,resource_size_t offset,const void * caller)36 static int __iopagetest(struct intel_memory_region *mem,
37 u8 __iomem *va, int pagesize,
38 u8 value, resource_size_t offset,
39 const void *caller)
40 {
41 int byte = get_random_u32_below(pagesize);
42 u8 result[3];
43
44 memset_io(va, value, pagesize); /* or GPF! */
45 wmb();
46
47 result[0] = ioread8(va);
48 result[1] = ioread8(va + byte);
49 result[2] = ioread8(va + pagesize - 1);
50 if (memchr_inv(result, value, sizeof(result))) {
51 dev_err(mem->i915->drm.dev,
52 "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",
53 &mem->region, &mem->io.start, &offset, caller,
54 value, result[0], result[1], result[2]);
55 return -EINVAL;
56 }
57
58 return 0;
59 }
60
iopagetest(struct intel_memory_region * mem,resource_size_t offset,const void * caller)61 static int iopagetest(struct intel_memory_region *mem,
62 resource_size_t offset,
63 const void *caller)
64 {
65 const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 };
66 void __iomem *va;
67 int err;
68 int i;
69
70 va = ioremap_wc(mem->io.start + offset, PAGE_SIZE);
71 if (!va) {
72 dev_err(mem->i915->drm.dev,
73 "Failed to ioremap memory region [%pa + %pa] for %ps\n",
74 &mem->io.start, &offset, caller);
75 return -EFAULT;
76 }
77
78 for (i = 0; i < ARRAY_SIZE(val); i++) {
79 err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller);
80 if (err)
81 break;
82
83 err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller);
84 if (err)
85 break;
86 }
87
88 iounmap(va);
89 return err;
90 }
91
random_page(resource_size_t last)92 static resource_size_t random_page(resource_size_t last)
93 {
94 /* Limited to low 44b (16TiB), but should suffice for a spot check */
95 return get_random_u32_below(last >> PAGE_SHIFT) << PAGE_SHIFT;
96 }
97
iomemtest(struct intel_memory_region * mem,bool test_all,const void * caller)98 static int iomemtest(struct intel_memory_region *mem,
99 bool test_all,
100 const void *caller)
101 {
102 resource_size_t last, page;
103 int err;
104
105 if (resource_size(&mem->io) < PAGE_SIZE)
106 return 0;
107
108 last = resource_size(&mem->io) - PAGE_SIZE;
109
110 /*
111 * Quick test to check read/write access to the iomap (backing store).
112 *
113 * Write a byte, read it back. If the iomapping fails, we expect
114 * a GPF preventing further execution. If the backing store does not
115 * exist, the read back will return garbage. We check a couple of pages,
116 * the first and last of the specified region to confirm the backing
117 * store + iomap does cover the entire memory region; and we check
118 * a random offset within as a quick spot check for bad memory.
119 */
120
121 if (test_all) {
122 for (page = 0; page <= last; page += PAGE_SIZE) {
123 err = iopagetest(mem, page, caller);
124 if (err)
125 return err;
126 }
127 } else {
128 err = iopagetest(mem, 0, caller);
129 if (err)
130 return err;
131
132 err = iopagetest(mem, last, caller);
133 if (err)
134 return err;
135
136 err = iopagetest(mem, random_page(last), caller);
137 if (err)
138 return err;
139 }
140
141 return 0;
142 }
143
144 struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private * i915,u16 class,u16 instance)145 intel_memory_region_lookup(struct drm_i915_private *i915,
146 u16 class, u16 instance)
147 {
148 struct intel_memory_region *mr;
149 int id;
150
151 /* XXX: consider maybe converting to an rb tree at some point */
152 for_each_memory_region(mr, i915, id) {
153 if (mr->type == class && mr->instance == instance)
154 return mr;
155 }
156
157 return NULL;
158 }
159
160 struct intel_memory_region *
intel_memory_region_by_type(struct drm_i915_private * i915,enum intel_memory_type mem_type)161 intel_memory_region_by_type(struct drm_i915_private *i915,
162 enum intel_memory_type mem_type)
163 {
164 struct intel_memory_region *mr;
165 int id;
166
167 for_each_memory_region(mr, i915, id)
168 if (mr->type == mem_type)
169 return mr;
170
171 return NULL;
172 }
173
174 /**
175 * intel_memory_region_reserve - Reserve a memory range
176 * @mem: The region for which we want to reserve a range.
177 * @offset: Start of the range to reserve.
178 * @size: The size of the range to reserve.
179 *
180 * Return: 0 on success, negative error code on failure.
181 */
intel_memory_region_reserve(struct intel_memory_region * mem,resource_size_t offset,resource_size_t size)182 int intel_memory_region_reserve(struct intel_memory_region *mem,
183 resource_size_t offset,
184 resource_size_t size)
185 {
186 struct ttm_resource_manager *man = mem->region_private;
187
188 GEM_BUG_ON(mem->is_range_manager);
189
190 return i915_ttm_buddy_man_reserve(man, offset, size);
191 }
192
intel_memory_region_debug(struct intel_memory_region * mr,struct drm_printer * printer)193 void intel_memory_region_debug(struct intel_memory_region *mr,
194 struct drm_printer *printer)
195 {
196 drm_printf(printer, "%s: ", mr->name);
197
198 if (mr->region_private)
199 ttm_resource_manager_debug(mr->region_private, printer);
200 else
201 drm_printf(printer, "total:%pa bytes\n", &mr->total);
202 }
203
intel_memory_region_memtest(struct intel_memory_region * mem,void * caller)204 static int intel_memory_region_memtest(struct intel_memory_region *mem,
205 void *caller)
206 {
207 struct drm_i915_private *i915 = mem->i915;
208 int err = 0;
209
210 if (!mem->io.start)
211 return 0;
212
213 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest)
214 err = iomemtest(mem, i915->params.memtest, caller);
215
216 return err;
217 }
218
region_type_str(u16 type)219 static const char *region_type_str(u16 type)
220 {
221 switch (type) {
222 case INTEL_MEMORY_SYSTEM:
223 return "system";
224 case INTEL_MEMORY_LOCAL:
225 return "local";
226 case INTEL_MEMORY_STOLEN_LOCAL:
227 return "stolen-local";
228 case INTEL_MEMORY_STOLEN_SYSTEM:
229 return "stolen-system";
230 default:
231 return "unknown";
232 }
233 }
234
235 struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private * i915,resource_size_t start,resource_size_t size,resource_size_t min_page_size,resource_size_t io_start,resource_size_t io_size,u16 type,u16 instance,const struct intel_memory_region_ops * ops)236 intel_memory_region_create(struct drm_i915_private *i915,
237 resource_size_t start,
238 resource_size_t size,
239 resource_size_t min_page_size,
240 resource_size_t io_start,
241 resource_size_t io_size,
242 u16 type,
243 u16 instance,
244 const struct intel_memory_region_ops *ops)
245 {
246 struct intel_memory_region *mem;
247 int err;
248
249 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
250 if (!mem)
251 return ERR_PTR(-ENOMEM);
252
253 mem->i915 = i915;
254 mem->region = DEFINE_RES_MEM(start, size);
255 mem->io = DEFINE_RES_MEM(io_start, io_size);
256 mem->min_page_size = min_page_size;
257 mem->ops = ops;
258 mem->total = size;
259 mem->type = type;
260 mem->instance = instance;
261
262 snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
263 region_type_str(type), instance);
264
265 mutex_init(&mem->objects.lock);
266 INIT_LIST_HEAD(&mem->objects.list);
267
268 if (ops->init) {
269 err = ops->init(mem);
270 if (err)
271 goto err_free;
272 }
273
274 err = intel_memory_region_memtest(mem, (void *)_RET_IP_);
275 if (err)
276 goto err_release;
277
278 return mem;
279
280 err_release:
281 if (mem->ops->release)
282 mem->ops->release(mem);
283 err_free:
284 kfree(mem);
285 return ERR_PTR(err);
286 }
287
intel_memory_region_set_name(struct intel_memory_region * mem,const char * fmt,...)288 void intel_memory_region_set_name(struct intel_memory_region *mem,
289 const char *fmt, ...)
290 {
291 va_list ap;
292
293 va_start(ap, fmt);
294 vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
295 va_end(ap);
296 }
297
intel_memory_region_avail(struct intel_memory_region * mr,u64 * avail,u64 * visible_avail)298 void intel_memory_region_avail(struct intel_memory_region *mr,
299 u64 *avail, u64 *visible_avail)
300 {
301 if (mr->type == INTEL_MEMORY_LOCAL) {
302 i915_ttm_buddy_man_avail(mr->region_private,
303 avail, visible_avail);
304 *avail <<= PAGE_SHIFT;
305 *visible_avail <<= PAGE_SHIFT;
306 } else {
307 *avail = mr->total;
308 *visible_avail = mr->total;
309 }
310 }
311
intel_memory_region_destroy(struct intel_memory_region * mem)312 void intel_memory_region_destroy(struct intel_memory_region *mem)
313 {
314 int ret = 0;
315
316 if (mem->ops->release)
317 ret = mem->ops->release(mem);
318
319 GEM_WARN_ON(!list_empty_careful(&mem->objects.list));
320 mutex_destroy(&mem->objects.lock);
321 if (!ret)
322 kfree(mem);
323 }
324
325 /* Global memory region registration -- only slight layer inversions! */
326
intel_memory_regions_hw_probe(struct drm_i915_private * i915)327 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
328 {
329 int err, i;
330
331 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
332 struct intel_memory_region *mem = ERR_PTR(-ENODEV);
333 u16 type, instance;
334
335 if (!HAS_REGION(i915, i))
336 continue;
337
338 type = intel_region_map[i].class;
339 instance = intel_region_map[i].instance;
340 switch (type) {
341 case INTEL_MEMORY_SYSTEM:
342 if (IS_DGFX(i915))
343 mem = i915_gem_ttm_system_setup(i915, type,
344 instance);
345 else
346 mem = i915_gem_shmem_setup(i915, type,
347 instance);
348 break;
349 case INTEL_MEMORY_STOLEN_LOCAL:
350 mem = i915_gem_stolen_lmem_setup(i915, type, instance);
351 if (!IS_ERR(mem))
352 i915->mm.stolen_region = mem;
353 break;
354 case INTEL_MEMORY_STOLEN_SYSTEM:
355 mem = i915_gem_stolen_smem_setup(i915, type, instance);
356 if (!IS_ERR(mem))
357 i915->mm.stolen_region = mem;
358 break;
359 default:
360 continue;
361 }
362
363 if (IS_ERR(mem)) {
364 err = PTR_ERR(mem);
365 drm_err(&i915->drm,
366 "Failed to setup region(%d) type=%d\n",
367 err, type);
368 goto out_cleanup;
369 }
370
371 if (mem) { /* Skip on non-fatal errors */
372 mem->id = i;
373 i915->mm.regions[i] = mem;
374 }
375 }
376
377 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
378 struct intel_memory_region *mem = i915->mm.regions[i];
379 u64 region_size, io_size;
380
381 if (!mem)
382 continue;
383
384 region_size = resource_size(&mem->region) >> 20;
385 io_size = resource_size(&mem->io) >> 20;
386
387 if (resource_size(&mem->io))
388 drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: %llu MiB %pR\n",
389 mem->id, mem->name, region_size, &mem->region, io_size, &mem->io);
390 else
391 drm_dbg(&i915->drm, "Memory region(%d): %s: %llu MiB %pR, io: n/a\n",
392 mem->id, mem->name, region_size, &mem->region);
393 }
394
395 return 0;
396
397 out_cleanup:
398 intel_memory_regions_driver_release(i915);
399 return err;
400 }
401
intel_memory_regions_driver_release(struct drm_i915_private * i915)402 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
403 {
404 int i;
405
406 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
407 struct intel_memory_region *region =
408 fetch_and_zero(&i915->mm.regions[i]);
409
410 if (region)
411 intel_memory_region_destroy(region);
412 }
413 }
414
415 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
416 #include "selftests/intel_memory_region.c"
417 #include "selftests/mock_region.c"
418 #endif
419