xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_stolen.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2008-2012 Intel Corporation
4  */
5 
6 #include <linux/errno.h>
7 #include <linux/mutex.h>
8 
9 #include <drm/drm_mm.h>
10 #include <drm/drm_print.h>
11 #include <drm/intel/i915_drm.h>
12 
13 #include "gem/i915_gem_lmem.h"
14 #include "gem/i915_gem_region.h"
15 #include "gt/intel_gt.h"
16 #include "gt/intel_gt_mcr.h"
17 #include "gt/intel_gt_regs.h"
18 #include "gt/intel_region_lmem.h"
19 #include "i915_drv.h"
20 #include "i915_gem_stolen.h"
21 #include "i915_pci.h"
22 #include "i915_reg.h"
23 #include "i915_utils.h"
24 #include "i915_vgpu.h"
25 #include "intel_mchbar_regs.h"
26 #include "intel_pci_config.h"
27 
28 struct intel_stolen_node {
29 	struct drm_i915_private *i915;
30 	struct drm_mm_node node;
31 };
32 
33 /*
34  * The BIOS typically reserves some of the system's memory for the exclusive
35  * use of the integrated graphics. This memory is no longer available for
36  * use by the OS and so the user finds that his system has less memory
37  * available than he put in. We refer to this memory as stolen.
38  *
39  * The BIOS will allocate its framebuffer from the stolen memory. Our
40  * goal is try to reuse that object for our own fbcon which must always
41  * be available for panics. Anything else we can reuse the stolen memory
42  * for is a boon.
43  */
44 
45 static int __i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
46 						  struct drm_mm_node *node, u64 size,
47 						  unsigned int alignment, u64 start, u64 end)
48 {
49 	int ret;
50 
51 	if (!drm_mm_initialized(&i915->mm.stolen))
52 		return -ENODEV;
53 
54 	/* WaSkipStolenMemoryFirstPage:bdw+ */
55 	if (GRAPHICS_VER(i915) >= 8 && start < 4096)
56 		start = 4096;
57 
58 	mutex_lock(&i915->mm.stolen_lock);
59 	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
60 					  size, alignment, 0,
61 					  start, end, DRM_MM_INSERT_BEST);
62 	mutex_unlock(&i915->mm.stolen_lock);
63 
64 	return ret;
65 }
66 
67 int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
68 					 unsigned int alignment, u64 start, u64 end)
69 {
70 	return __i915_gem_stolen_insert_node_in_range(node->i915, &node->node,
71 						      size, alignment,
72 						      start, end);
73 }
74 
75 static int __i915_gem_stolen_insert_node(struct drm_i915_private *i915,
76 					 struct drm_mm_node *node, u64 size,
77 					 unsigned int alignment)
78 {
79 	return __i915_gem_stolen_insert_node_in_range(i915, node,
80 						      size, alignment,
81 						      I915_GEM_STOLEN_BIAS,
82 						      U64_MAX);
83 }
84 
85 int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size,
86 				unsigned int alignment)
87 {
88 	return __i915_gem_stolen_insert_node(node->i915, &node->node, size, alignment);
89 }
90 
91 static void __i915_gem_stolen_remove_node(struct drm_i915_private *i915,
92 					  struct drm_mm_node *node)
93 {
94 	mutex_lock(&i915->mm.stolen_lock);
95 	drm_mm_remove_node(node);
96 	mutex_unlock(&i915->mm.stolen_lock);
97 }
98 
99 void i915_gem_stolen_remove_node(struct intel_stolen_node *node)
100 {
101 	__i915_gem_stolen_remove_node(node->i915, &node->node);
102 }
103 
104 static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
105 {
106 	return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start;
107 }
108 
109 static int adjust_stolen(struct drm_i915_private *i915,
110 			 struct resource *dsm)
111 {
112 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
113 	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
114 
115 	if (!valid_stolen_size(i915, dsm))
116 		return -EINVAL;
117 
118 	/*
119 	 * Make sure we don't clobber the GTT if it's within stolen memory
120 	 *
121 	 * TODO: We have yet too encounter the case where the GTT wasn't at the
122 	 * end of stolen. With that assumption we could simplify this.
123 	 */
124 	if (GRAPHICS_VER(i915) <= 4 &&
125 	    !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
126 		struct resource stolen[2] = {*dsm, *dsm};
127 		struct resource ggtt_res;
128 		resource_size_t ggtt_start;
129 
130 		ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
131 		if (GRAPHICS_VER(i915) == 4)
132 			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
133 				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
134 		else
135 			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
136 
137 		ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4);
138 
139 		if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
140 			stolen[0].end = ggtt_res.start;
141 		if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
142 			stolen[1].start = ggtt_res.end;
143 
144 		/* Pick the larger of the two chunks */
145 		if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
146 			*dsm = stolen[0];
147 		else
148 			*dsm = stolen[1];
149 
150 		if (stolen[0].start != stolen[1].start ||
151 		    stolen[0].end != stolen[1].end) {
152 			drm_dbg(&i915->drm,
153 				"GTT within stolen memory at %pR\n",
154 				&ggtt_res);
155 			drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
156 				dsm);
157 		}
158 	}
159 
160 	if (!valid_stolen_size(i915, dsm))
161 		return -EINVAL;
162 
163 	return 0;
164 }
165 
166 static int request_smem_stolen(struct drm_i915_private *i915,
167 			       struct resource *dsm)
168 {
169 	struct resource *r;
170 
171 	/*
172 	 * With stolen lmem, we don't need to request system memory for the
173 	 * address range since it's local to the gpu.
174 	 *
175 	 * Starting MTL, in IGFX devices the stolen memory is exposed via
176 	 * LMEMBAR and shall be considered similar to stolen lmem.
177 	 */
178 	if (HAS_LMEM(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
179 		return 0;
180 
181 	/*
182 	 * Verify that nothing else uses this physical address. Stolen
183 	 * memory should be reserved by the BIOS and hidden from the
184 	 * kernel. So if the region is already marked as busy, something
185 	 * is seriously wrong.
186 	 */
187 	r = devm_request_mem_region(i915->drm.dev, dsm->start,
188 				    resource_size(dsm),
189 				    "Graphics Stolen Memory");
190 	if (r == NULL) {
191 		/*
192 		 * One more attempt but this time requesting region from
193 		 * start + 1, as we have seen that this resolves the region
194 		 * conflict with the PCI Bus.
195 		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
196 		 * PCI bus, but have an off-by-one error. Hence retry the
197 		 * reservation starting from 1 instead of 0.
198 		 * There's also BIOS with off-by-one on the other end.
199 		 */
200 		r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
201 					    resource_size(dsm) - 2,
202 					    "Graphics Stolen Memory");
203 		/*
204 		 * GEN3 firmware likes to smash pci bridges into the stolen
205 		 * range. Apparently this works.
206 		 */
207 		if (!r && GRAPHICS_VER(i915) != 3) {
208 			drm_err(&i915->drm,
209 				"conflict detected with stolen region: %pR\n",
210 				dsm);
211 
212 			return -EBUSY;
213 		}
214 	}
215 
216 	return 0;
217 }
218 
219 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
220 {
221 	if (!drm_mm_initialized(&i915->mm.stolen))
222 		return;
223 
224 	drm_mm_takedown(&i915->mm.stolen);
225 }
226 
227 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
228 				    struct intel_uncore *uncore,
229 				    resource_size_t *base,
230 				    resource_size_t *size)
231 {
232 	u32 reg_val = intel_uncore_read(uncore,
233 					IS_GM45(i915) ?
234 					CTG_STOLEN_RESERVED :
235 					ELK_STOLEN_RESERVED);
236 	resource_size_t stolen_top = i915->dsm.stolen.end + 1;
237 
238 	drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
239 		IS_GM45(i915) ? "CTG" : "ELK", reg_val);
240 
241 	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
242 		return;
243 
244 	/*
245 	 * Whether ILK really reuses the ELK register for this is unclear.
246 	 * Let's see if we catch anyone with this supposedly enabled on ILK.
247 	 */
248 	drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
249 		 "ILK stolen reserved found? 0x%08x\n",
250 		 reg_val);
251 
252 	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
253 		return;
254 
255 	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
256 	drm_WARN_ON(&i915->drm,
257 		    (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
258 
259 	*size = stolen_top - *base;
260 }
261 
262 static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
263 				     struct intel_uncore *uncore,
264 				     resource_size_t *base,
265 				     resource_size_t *size)
266 {
267 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
268 
269 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
270 
271 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
272 		return;
273 
274 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
275 
276 	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
277 	case GEN6_STOLEN_RESERVED_1M:
278 		*size = 1024 * 1024;
279 		break;
280 	case GEN6_STOLEN_RESERVED_512K:
281 		*size = 512 * 1024;
282 		break;
283 	case GEN6_STOLEN_RESERVED_256K:
284 		*size = 256 * 1024;
285 		break;
286 	case GEN6_STOLEN_RESERVED_128K:
287 		*size = 128 * 1024;
288 		break;
289 	default:
290 		*size = 1024 * 1024;
291 		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
292 	}
293 }
294 
295 static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
296 				    struct intel_uncore *uncore,
297 				    resource_size_t *base,
298 				    resource_size_t *size)
299 {
300 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
301 	resource_size_t stolen_top = i915->dsm.stolen.end + 1;
302 
303 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
304 
305 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
306 		return;
307 
308 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
309 	default:
310 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
311 		fallthrough;
312 	case GEN7_STOLEN_RESERVED_1M:
313 		*size = 1024 * 1024;
314 		break;
315 	}
316 
317 	/*
318 	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
319 	 * reserved location as (top - size).
320 	 */
321 	*base = stolen_top - *size;
322 }
323 
324 static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
325 				     struct intel_uncore *uncore,
326 				     resource_size_t *base,
327 				     resource_size_t *size)
328 {
329 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
330 
331 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
332 
333 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
334 		return;
335 
336 	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
337 
338 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
339 	case GEN7_STOLEN_RESERVED_1M:
340 		*size = 1024 * 1024;
341 		break;
342 	case GEN7_STOLEN_RESERVED_256K:
343 		*size = 256 * 1024;
344 		break;
345 	default:
346 		*size = 1024 * 1024;
347 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
348 	}
349 }
350 
351 static void chv_get_stolen_reserved(struct drm_i915_private *i915,
352 				    struct intel_uncore *uncore,
353 				    resource_size_t *base,
354 				    resource_size_t *size)
355 {
356 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
357 
358 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
359 
360 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
361 		return;
362 
363 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
364 
365 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
366 	case GEN8_STOLEN_RESERVED_1M:
367 		*size = 1024 * 1024;
368 		break;
369 	case GEN8_STOLEN_RESERVED_2M:
370 		*size = 2 * 1024 * 1024;
371 		break;
372 	case GEN8_STOLEN_RESERVED_4M:
373 		*size = 4 * 1024 * 1024;
374 		break;
375 	case GEN8_STOLEN_RESERVED_8M:
376 		*size = 8 * 1024 * 1024;
377 		break;
378 	default:
379 		*size = 8 * 1024 * 1024;
380 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
381 	}
382 }
383 
384 static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
385 				    struct intel_uncore *uncore,
386 				    resource_size_t *base,
387 				    resource_size_t *size)
388 {
389 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
390 	resource_size_t stolen_top = i915->dsm.stolen.end + 1;
391 
392 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
393 
394 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
395 		return;
396 
397 	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
398 		return;
399 
400 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
401 	*size = stolen_top - *base;
402 }
403 
404 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
405 				    struct intel_uncore *uncore,
406 				    resource_size_t *base,
407 				    resource_size_t *size)
408 {
409 	u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
410 
411 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
412 
413 	/* Wa_14019821291 */
414 	if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
415 		/*
416 		 * This workaround is primarily implemented by the BIOS.  We
417 		 * just need to figure out whether the BIOS has applied the
418 		 * workaround (meaning the programmed address falls within
419 		 * the DSM) and, if so, reserve that part of the DSM to
420 		 * prevent accidental reuse.  The DSM location should be just
421 		 * below the WOPCM.
422 		 */
423 		u64 gscpsmi_base = intel_uncore_read64_2x32(uncore,
424 							    MTL_GSCPSMI_BASEADDR_LSB,
425 							    MTL_GSCPSMI_BASEADDR_MSB);
426 		if (gscpsmi_base >= i915->dsm.stolen.start &&
427 		    gscpsmi_base < i915->dsm.stolen.end) {
428 			*base = gscpsmi_base;
429 			*size = i915->dsm.stolen.end - gscpsmi_base;
430 			return;
431 		}
432 	}
433 
434 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
435 	case GEN8_STOLEN_RESERVED_1M:
436 		*size = 1024 * 1024;
437 		break;
438 	case GEN8_STOLEN_RESERVED_2M:
439 		*size = 2 * 1024 * 1024;
440 		break;
441 	case GEN8_STOLEN_RESERVED_4M:
442 		*size = 4 * 1024 * 1024;
443 		break;
444 	case GEN8_STOLEN_RESERVED_8M:
445 		*size = 8 * 1024 * 1024;
446 		break;
447 	default:
448 		*size = 8 * 1024 * 1024;
449 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
450 	}
451 
452 	if (HAS_LMEMBAR_SMEM_STOLEN(i915))
453 		/* the base is initialized to stolen top so subtract size to get base */
454 		*base -= *size;
455 	else
456 		*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
457 }
458 
459 /*
460  * Initialize i915->dsm.reserved to contain the reserved space within the Data
461  * Stolen Memory. This is a range on the top of DSM that is reserved, not to
462  * be used by driver, so must be excluded from the region passed to the
463  * allocator later. In the spec this is also called as WOPCM.
464  *
465  * Our expectation is that the reserved space is at the top of the stolen
466  * region, as it has been the case for every platform, and *never* at the
467  * bottom, so the calculation here can be simplified.
468  */
469 static int init_reserved_stolen(struct drm_i915_private *i915)
470 {
471 	struct intel_uncore *uncore = &i915->uncore;
472 	resource_size_t reserved_base, stolen_top;
473 	resource_size_t reserved_size;
474 	int ret = 0;
475 
476 	stolen_top = i915->dsm.stolen.end + 1;
477 	reserved_base = stolen_top;
478 	reserved_size = 0;
479 
480 	if (GRAPHICS_VER(i915) >= 11) {
481 		icl_get_stolen_reserved(i915, uncore,
482 					&reserved_base, &reserved_size);
483 	} else if (GRAPHICS_VER(i915) >= 8) {
484 		if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915) || IS_GEMINILAKE(i915))
485 			chv_get_stolen_reserved(i915, uncore,
486 						&reserved_base, &reserved_size);
487 		else
488 			bdw_get_stolen_reserved(i915, uncore,
489 						&reserved_base, &reserved_size);
490 	} else if (GRAPHICS_VER(i915) >= 7) {
491 		if (IS_VALLEYVIEW(i915))
492 			vlv_get_stolen_reserved(i915, uncore,
493 						&reserved_base, &reserved_size);
494 		else
495 			gen7_get_stolen_reserved(i915, uncore,
496 						 &reserved_base, &reserved_size);
497 	} else if (GRAPHICS_VER(i915) >= 6) {
498 		gen6_get_stolen_reserved(i915, uncore,
499 					 &reserved_base, &reserved_size);
500 	} else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
501 		g4x_get_stolen_reserved(i915, uncore,
502 					&reserved_base, &reserved_size);
503 	}
504 
505 	/* No reserved stolen */
506 	if (reserved_base == stolen_top)
507 		goto bail_out;
508 
509 	if (!reserved_base) {
510 		drm_err(&i915->drm,
511 			"inconsistent reservation %pa + %pa; ignoring\n",
512 			&reserved_base, &reserved_size);
513 		ret = -EINVAL;
514 		goto bail_out;
515 	}
516 
517 	i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size);
518 
519 	if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) {
520 		drm_err(&i915->drm,
521 			"Stolen reserved area %pR outside stolen memory %pR\n",
522 			&i915->dsm.reserved, &i915->dsm.stolen);
523 		ret = -EINVAL;
524 		goto bail_out;
525 	}
526 
527 	return 0;
528 
529 bail_out:
530 	i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0);
531 
532 	return ret;
533 }
534 
535 static int i915_gem_init_stolen(struct intel_memory_region *mem)
536 {
537 	struct drm_i915_private *i915 = mem->i915;
538 
539 	mutex_init(&i915->mm.stolen_lock);
540 
541 	if (intel_vgpu_active(i915)) {
542 		drm_notice(&i915->drm,
543 			   "%s, disabling use of stolen memory\n",
544 			   "iGVT-g active");
545 		return -ENOSPC;
546 	}
547 
548 	if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
549 		drm_notice(&i915->drm,
550 			   "%s, disabling use of stolen memory\n",
551 			   "DMAR active");
552 		return -ENOSPC;
553 	}
554 
555 	if (adjust_stolen(i915, &mem->region))
556 		return -ENOSPC;
557 
558 	if (request_smem_stolen(i915, &mem->region))
559 		return -ENOSPC;
560 
561 	i915->dsm.stolen = mem->region;
562 
563 	if (init_reserved_stolen(i915))
564 		return -ENOSPC;
565 
566 	/* Exclude the reserved region from driver use */
567 	mem->region.end = i915->dsm.reserved.start - 1;
568 	mem->io = DEFINE_RES_MEM(mem->io.start,
569 				 min(resource_size(&mem->io),
570 				     resource_size(&mem->region)));
571 
572 	i915->dsm.usable_size = resource_size(&mem->region);
573 
574 	drm_dbg(&i915->drm,
575 		"Memory reserved for graphics device: %lluK, usable: %lluK\n",
576 		(u64)resource_size(&i915->dsm.stolen) >> 10,
577 		(u64)i915->dsm.usable_size >> 10);
578 
579 	if (i915->dsm.usable_size == 0)
580 		return -ENOSPC;
581 
582 	/* Basic memrange allocator for stolen space. */
583 	drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size);
584 
585 	/*
586 	 * Access to stolen lmem beyond certain size for MTL A0 stepping
587 	 * would crash the machine. Disable stolen lmem for userspace access
588 	 * by setting usable_size to zero.
589 	 */
590 	if (IS_METEORLAKE(i915) && INTEL_REVID(i915) == 0x0)
591 		i915->dsm.usable_size = 0;
592 
593 	return 0;
594 }
595 
596 static void dbg_poison(struct i915_ggtt *ggtt,
597 		       dma_addr_t addr, resource_size_t size,
598 		       u8 x)
599 {
600 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
601 	if (!drm_mm_node_allocated(&ggtt->error_capture))
602 		return;
603 
604 	if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
605 		return; /* beware stop_machine() inversion */
606 
607 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
608 
609 	mutex_lock(&ggtt->error_mutex);
610 	while (size) {
611 		void __iomem *s;
612 
613 		ggtt->vm.insert_page(&ggtt->vm, addr,
614 				     ggtt->error_capture.start,
615 				     i915_gem_get_pat_index(ggtt->vm.i915,
616 							    I915_CACHE_NONE),
617 				     0);
618 		mb();
619 
620 		s = io_mapping_map_wc(&ggtt->iomap,
621 				      ggtt->error_capture.start,
622 				      PAGE_SIZE);
623 		memset_io(s, x, PAGE_SIZE);
624 		io_mapping_unmap(s);
625 
626 		addr += PAGE_SIZE;
627 		size -= PAGE_SIZE;
628 	}
629 	mb();
630 	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
631 	mutex_unlock(&ggtt->error_mutex);
632 #endif
633 }
634 
635 static struct sg_table *
636 i915_pages_create_for_stolen(struct drm_device *dev,
637 			     resource_size_t offset, resource_size_t size)
638 {
639 	struct drm_i915_private *i915 = to_i915(dev);
640 	struct sg_table *st;
641 	struct scatterlist *sg;
642 
643 	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen)));
644 
645 	/* We hide that we have no struct page backing our stolen object
646 	 * by wrapping the contiguous physical allocation with a fake
647 	 * dma mapping in a single scatterlist.
648 	 */
649 
650 	st = kmalloc(sizeof(*st), GFP_KERNEL);
651 	if (st == NULL)
652 		return ERR_PTR(-ENOMEM);
653 
654 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
655 		kfree(st);
656 		return ERR_PTR(-ENOMEM);
657 	}
658 
659 	sg = st->sgl;
660 	sg->offset = 0;
661 	sg->length = size;
662 
663 	sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset;
664 	sg_dma_len(sg) = size;
665 
666 	return st;
667 }
668 
669 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
670 {
671 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
672 	struct sg_table *pages =
673 		i915_pages_create_for_stolen(obj->base.dev,
674 					     obj->stolen->start,
675 					     obj->stolen->size);
676 	if (IS_ERR(pages))
677 		return PTR_ERR(pages);
678 
679 	dbg_poison(to_gt(i915)->ggtt,
680 		   sg_dma_address(pages->sgl),
681 		   sg_dma_len(pages->sgl),
682 		   POISON_INUSE);
683 
684 	__i915_gem_object_set_pages(obj, pages);
685 
686 	return 0;
687 }
688 
689 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
690 					     struct sg_table *pages)
691 {
692 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
693 	/* Should only be called from i915_gem_object_release_stolen() */
694 
695 	dbg_poison(to_gt(i915)->ggtt,
696 		   sg_dma_address(pages->sgl),
697 		   sg_dma_len(pages->sgl),
698 		   POISON_FREE);
699 
700 	sg_free_table(pages);
701 	kfree(pages);
702 }
703 
704 static void
705 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
706 {
707 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
708 	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
709 
710 	GEM_BUG_ON(!stolen);
711 	__i915_gem_stolen_remove_node(i915, stolen);
712 	kfree(stolen);
713 
714 	i915_gem_object_release_memory_region(obj);
715 }
716 
717 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
718 	.name = "i915_gem_object_stolen",
719 	.get_pages = i915_gem_object_get_pages_stolen,
720 	.put_pages = i915_gem_object_put_pages_stolen,
721 	.release = i915_gem_object_release_stolen,
722 };
723 
724 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
725 					   struct drm_i915_gem_object *obj,
726 					   struct drm_mm_node *stolen)
727 {
728 	static struct lock_class_key lock_class;
729 	unsigned int cache_level;
730 	unsigned int flags;
731 	int err;
732 
733 	/*
734 	 * Stolen objects are always physically contiguous since we just
735 	 * allocate one big block underneath using the drm_mm range allocator.
736 	 */
737 	flags = I915_BO_ALLOC_CONTIGUOUS;
738 
739 	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
740 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
741 
742 	obj->stolen = stolen;
743 	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
744 	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
745 	i915_gem_object_set_cache_coherency(obj, cache_level);
746 
747 	if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
748 		return -EBUSY;
749 
750 	i915_gem_object_init_memory_region(obj, mem);
751 
752 	err = i915_gem_object_pin_pages(obj);
753 	if (err)
754 		i915_gem_object_release_memory_region(obj);
755 	i915_gem_object_unlock(obj);
756 
757 	return err;
758 }
759 
760 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
761 					struct drm_i915_gem_object *obj,
762 					resource_size_t offset,
763 					resource_size_t size,
764 					resource_size_t page_size,
765 					unsigned int flags)
766 {
767 	struct drm_i915_private *i915 = mem->i915;
768 	struct drm_mm_node *stolen;
769 	int ret;
770 
771 	if (!drm_mm_initialized(&i915->mm.stolen))
772 		return -ENODEV;
773 
774 	if (size == 0)
775 		return -EINVAL;
776 
777 	/*
778 	 * With discrete devices, where we lack a mappable aperture there is no
779 	 * possible way to ever access this memory on the CPU side.
780 	 */
781 	if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) &&
782 	    !(flags & I915_BO_ALLOC_GPU_ONLY))
783 		return -ENOSPC;
784 
785 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
786 	if (!stolen)
787 		return -ENOMEM;
788 
789 	if (offset != I915_BO_INVALID_OFFSET) {
790 		drm_dbg(&i915->drm,
791 			"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
792 			&offset, &size);
793 
794 		stolen->start = offset;
795 		stolen->size = size;
796 		mutex_lock(&i915->mm.stolen_lock);
797 		ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
798 		mutex_unlock(&i915->mm.stolen_lock);
799 	} else {
800 		ret = __i915_gem_stolen_insert_node(i915, stolen, size,
801 						    mem->min_page_size);
802 	}
803 	if (ret)
804 		goto err_free;
805 
806 	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
807 	if (ret)
808 		goto err_remove;
809 
810 	return 0;
811 
812 err_remove:
813 	__i915_gem_stolen_remove_node(i915, stolen);
814 err_free:
815 	kfree(stolen);
816 	return ret;
817 }
818 
819 struct drm_i915_gem_object *
820 i915_gem_object_create_stolen(struct drm_i915_private *i915,
821 			      resource_size_t size)
822 {
823 	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
824 }
825 
826 static int init_stolen_smem(struct intel_memory_region *mem)
827 {
828 	int err;
829 
830 	/*
831 	 * Initialise stolen early so that we may reserve preallocated
832 	 * objects for the BIOS to KMS transition.
833 	 */
834 	err = i915_gem_init_stolen(mem);
835 	if (err)
836 		drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
837 
838 	return 0;
839 }
840 
841 static int release_stolen_smem(struct intel_memory_region *mem)
842 {
843 	i915_gem_cleanup_stolen(mem->i915);
844 	return 0;
845 }
846 
847 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
848 	.init = init_stolen_smem,
849 	.release = release_stolen_smem,
850 	.init_object = _i915_gem_object_stolen_init,
851 };
852 
853 static int init_stolen_lmem(struct intel_memory_region *mem)
854 {
855 	int err;
856 
857 	if (GEM_WARN_ON(resource_size(&mem->region) == 0))
858 		return 0;
859 
860 	err = i915_gem_init_stolen(mem);
861 	if (err) {
862 		drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n");
863 		return 0;
864 	}
865 
866 	if (resource_size(&mem->io) &&
867 	    !io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io)))
868 		goto err_cleanup;
869 
870 	return 0;
871 
872 err_cleanup:
873 	i915_gem_cleanup_stolen(mem->i915);
874 	return err;
875 }
876 
877 static int release_stolen_lmem(struct intel_memory_region *mem)
878 {
879 	if (resource_size(&mem->io))
880 		io_mapping_fini(&mem->iomap);
881 	i915_gem_cleanup_stolen(mem->i915);
882 	return 0;
883 }
884 
885 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
886 	.init = init_stolen_lmem,
887 	.release = release_stolen_lmem,
888 	.init_object = _i915_gem_object_stolen_init,
889 };
890 
891 static int mtl_get_gms_size(struct intel_uncore *uncore)
892 {
893 	u16 ggc, gms;
894 
895 	ggc = intel_uncore_read16(uncore, GGC);
896 
897 	/* check GGMS, should be fixed 0x3 (8MB) */
898 	if ((ggc & GGMS_MASK) != GGMS_MASK)
899 		return -EIO;
900 
901 	/* return valid GMS value, -EIO if invalid */
902 	gms = REG_FIELD_GET(GMS_MASK, ggc);
903 	switch (gms) {
904 	case 0x0 ... 0x04:
905 		return gms * 32;
906 	case 0xf0 ... 0xfe:
907 		return (gms - 0xf0 + 1) * 4;
908 	default:
909 		MISSING_CASE(gms);
910 		return -EIO;
911 	}
912 }
913 
914 struct intel_memory_region *
915 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
916 			   u16 instance)
917 {
918 	struct intel_uncore *uncore = &i915->uncore;
919 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
920 	resource_size_t dsm_size, dsm_base, lmem_size;
921 	struct intel_memory_region *mem;
922 	resource_size_t io_start, io_size;
923 	resource_size_t min_page_size;
924 	int ret;
925 
926 	if (WARN_ON_ONCE(instance))
927 		return ERR_PTR(-ENODEV);
928 
929 	if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
930 		return ERR_PTR(-ENXIO);
931 
932 	if (HAS_LMEMBAR_SMEM_STOLEN(i915) || IS_DG1(i915)) {
933 		lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
934 	} else {
935 		resource_size_t lmem_range;
936 
937 		lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
938 		lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
939 		lmem_size *= SZ_1G;
940 	}
941 
942 	if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
943 		/*
944 		 * MTL dsm size is in GGC register.
945 		 * Also MTL uses offset to GSMBASE in ptes, so i915
946 		 * uses dsm_base = 8MBs to setup stolen region, since
947 		 * DSMBASE = GSMBASE + 8MB.
948 		 */
949 		ret = mtl_get_gms_size(uncore);
950 		if (ret < 0) {
951 			drm_err(&i915->drm, "invalid MTL GGC register setting\n");
952 			return ERR_PTR(ret);
953 		}
954 
955 		dsm_base = SZ_8M;
956 		dsm_size = (resource_size_t)(ret * SZ_1M);
957 
958 		GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M);
959 		GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
960 	} else {
961 		/* Use DSM base address instead for stolen memory */
962 		dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
963 		if (lmem_size < dsm_base) {
964 			drm_dbg(&i915->drm,
965 				"Disabling stolen memory support due to OOB placement: lmem_size = %pa vs dsm_base = %pa\n",
966 				&lmem_size, &dsm_base);
967 			return NULL;
968 		}
969 		dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
970 	}
971 
972 	if (i915_direct_stolen_access(i915)) {
973 		drm_dbg(&i915->drm, "Using direct DSM access\n");
974 		io_start = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK;
975 		io_size = dsm_size;
976 	} else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
977 		io_start = 0;
978 		io_size = 0;
979 	} else {
980 		io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
981 		io_size = dsm_size;
982 	}
983 
984 	min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
985 						I915_GTT_PAGE_SIZE_4K;
986 
987 	mem = intel_memory_region_create(i915, dsm_base, dsm_size,
988 					 min_page_size,
989 					 io_start, io_size,
990 					 type, instance,
991 					 &i915_region_stolen_lmem_ops);
992 	if (IS_ERR(mem))
993 		return mem;
994 
995 	intel_memory_region_set_name(mem, "stolen-local");
996 
997 	mem->private = true;
998 
999 	return mem;
1000 }
1001 
1002 struct intel_memory_region*
1003 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
1004 			   u16 instance)
1005 {
1006 	struct intel_memory_region *mem;
1007 
1008 	mem = intel_memory_region_create(i915,
1009 					 intel_graphics_stolen_res.start,
1010 					 resource_size(&intel_graphics_stolen_res),
1011 					 PAGE_SIZE, 0, 0, type, instance,
1012 					 &i915_region_stolen_smem_ops);
1013 	if (IS_ERR(mem))
1014 		return mem;
1015 
1016 	intel_memory_region_set_name(mem, "stolen-system");
1017 
1018 	mem->private = true;
1019 
1020 	return mem;
1021 }
1022 
1023 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
1024 {
1025 	return obj->ops == &i915_gem_object_stolen_ops;
1026 }
1027 
1028 bool i915_gem_stolen_initialized(struct drm_device *drm)
1029 {
1030 	struct drm_i915_private *i915 = to_i915(drm);
1031 
1032 	return drm_mm_initialized(&i915->mm.stolen);
1033 }
1034 
1035 u64 i915_gem_stolen_area_address(struct drm_device *drm)
1036 {
1037 	struct drm_i915_private *i915 = to_i915(drm);
1038 
1039 	return i915->dsm.stolen.start;
1040 }
1041 
1042 u64 i915_gem_stolen_area_size(struct drm_device *drm)
1043 {
1044 	struct drm_i915_private *i915 = to_i915(drm);
1045 
1046 	return resource_size(&i915->dsm.stolen);
1047 }
1048 
1049 u64 i915_gem_stolen_node_address(const struct intel_stolen_node *node)
1050 {
1051 	struct drm_i915_private *i915 = node->i915;
1052 
1053 	return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
1054 }
1055 
1056 bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node)
1057 {
1058 	return drm_mm_node_allocated(&node->node);
1059 }
1060 
1061 u64 i915_gem_stolen_node_offset(const struct intel_stolen_node *node)
1062 {
1063 	return node->node.start;
1064 }
1065 
1066 u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node)
1067 {
1068 	return node->node.size;
1069 }
1070 
1071 struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm)
1072 {
1073 	struct drm_i915_private *i915 = to_i915(drm);
1074 	struct intel_stolen_node *node;
1075 
1076 	node = kzalloc(sizeof(*node), GFP_KERNEL);
1077 	if (!node)
1078 		return NULL;
1079 
1080 	node->i915 = i915;
1081 
1082 	return node;
1083 }
1084 
1085 void i915_gem_stolen_node_free(const struct intel_stolen_node *node)
1086 {
1087 	kfree(node);
1088 }
1089