xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_stolen.c (revision b9d7eb6a31be296ca0af95641a23c4c758703c0a)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2012 Intel Corporation
5  */
6 
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
9 
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
12 
13 #include "gem/i915_gem_lmem.h"
14 #include "gem/i915_gem_region.h"
15 #include "i915_drv.h"
16 #include "i915_gem_stolen.h"
17 #include "i915_reg.h"
18 #include "i915_vgpu.h"
19 
20 /*
21  * The BIOS typically reserves some of the system's memory for the exclusive
22  * use of the integrated graphics. This memory is no longer available for
23  * use by the OS and so the user finds that his system has less memory
24  * available than he put in. We refer to this memory as stolen.
25  *
26  * The BIOS will allocate its framebuffer from the stolen memory. Our
27  * goal is try to reuse that object for our own fbcon which must always
28  * be available for panics. Anything else we can reuse the stolen memory
29  * for is a boon.
30  */
31 
32 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
33 					 struct drm_mm_node *node, u64 size,
34 					 unsigned alignment, u64 start, u64 end)
35 {
36 	int ret;
37 
38 	if (!drm_mm_initialized(&i915->mm.stolen))
39 		return -ENODEV;
40 
41 	/* WaSkipStolenMemoryFirstPage:bdw+ */
42 	if (GRAPHICS_VER(i915) >= 8 && start < 4096)
43 		start = 4096;
44 
45 	mutex_lock(&i915->mm.stolen_lock);
46 	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
47 					  size, alignment, 0,
48 					  start, end, DRM_MM_INSERT_BEST);
49 	mutex_unlock(&i915->mm.stolen_lock);
50 
51 	return ret;
52 }
53 
54 int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
55 				struct drm_mm_node *node, u64 size,
56 				unsigned alignment)
57 {
58 	return i915_gem_stolen_insert_node_in_range(i915, node,
59 						    size, alignment,
60 						    I915_GEM_STOLEN_BIAS,
61 						    U64_MAX);
62 }
63 
64 void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
65 				 struct drm_mm_node *node)
66 {
67 	mutex_lock(&i915->mm.stolen_lock);
68 	drm_mm_remove_node(node);
69 	mutex_unlock(&i915->mm.stolen_lock);
70 }
71 
72 static int i915_adjust_stolen(struct drm_i915_private *i915,
73 			      struct resource *dsm)
74 {
75 	struct i915_ggtt *ggtt = &i915->ggtt;
76 	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
77 	struct resource *r;
78 
79 	if (dsm->start == 0 || dsm->end <= dsm->start)
80 		return -EINVAL;
81 
82 	/*
83 	 * TODO: We have yet too encounter the case where the GTT wasn't at the
84 	 * end of stolen. With that assumption we could simplify this.
85 	 */
86 
87 	/* Make sure we don't clobber the GTT if it's within stolen memory */
88 	if (GRAPHICS_VER(i915) <= 4 &&
89 	    !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
90 		struct resource stolen[2] = {*dsm, *dsm};
91 		struct resource ggtt_res;
92 		resource_size_t ggtt_start;
93 
94 		ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
95 		if (GRAPHICS_VER(i915) == 4)
96 			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
97 				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
98 		else
99 			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
100 
101 		ggtt_res =
102 			(struct resource) DEFINE_RES_MEM(ggtt_start,
103 							 ggtt_total_entries(ggtt) * 4);
104 
105 		if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
106 			stolen[0].end = ggtt_res.start;
107 		if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
108 			stolen[1].start = ggtt_res.end;
109 
110 		/* Pick the larger of the two chunks */
111 		if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
112 			*dsm = stolen[0];
113 		else
114 			*dsm = stolen[1];
115 
116 		if (stolen[0].start != stolen[1].start ||
117 		    stolen[0].end != stolen[1].end) {
118 			drm_dbg(&i915->drm,
119 				"GTT within stolen memory at %pR\n",
120 				&ggtt_res);
121 			drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
122 				dsm);
123 		}
124 	}
125 
126 	/*
127 	 * With stolen lmem, we don't need to check if the address range
128 	 * overlaps with the non-stolen system memory range, since lmem is local
129 	 * to the gpu.
130 	 */
131 	if (HAS_LMEM(i915))
132 		return 0;
133 
134 	/*
135 	 * Verify that nothing else uses this physical address. Stolen
136 	 * memory should be reserved by the BIOS and hidden from the
137 	 * kernel. So if the region is already marked as busy, something
138 	 * is seriously wrong.
139 	 */
140 	r = devm_request_mem_region(i915->drm.dev, dsm->start,
141 				    resource_size(dsm),
142 				    "Graphics Stolen Memory");
143 	if (r == NULL) {
144 		/*
145 		 * One more attempt but this time requesting region from
146 		 * start + 1, as we have seen that this resolves the region
147 		 * conflict with the PCI Bus.
148 		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
149 		 * PCI bus, but have an off-by-one error. Hence retry the
150 		 * reservation starting from 1 instead of 0.
151 		 * There's also BIOS with off-by-one on the other end.
152 		 */
153 		r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
154 					    resource_size(dsm) - 2,
155 					    "Graphics Stolen Memory");
156 		/*
157 		 * GEN3 firmware likes to smash pci bridges into the stolen
158 		 * range. Apparently this works.
159 		 */
160 		if (!r && GRAPHICS_VER(i915) != 3) {
161 			drm_err(&i915->drm,
162 				"conflict detected with stolen region: %pR\n",
163 				dsm);
164 
165 			return -EBUSY;
166 		}
167 	}
168 
169 	return 0;
170 }
171 
172 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
173 {
174 	if (!drm_mm_initialized(&i915->mm.stolen))
175 		return;
176 
177 	drm_mm_takedown(&i915->mm.stolen);
178 }
179 
180 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
181 				    struct intel_uncore *uncore,
182 				    resource_size_t *base,
183 				    resource_size_t *size)
184 {
185 	u32 reg_val = intel_uncore_read(uncore,
186 					IS_GM45(i915) ?
187 					CTG_STOLEN_RESERVED :
188 					ELK_STOLEN_RESERVED);
189 	resource_size_t stolen_top = i915->dsm.end + 1;
190 
191 	drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
192 		IS_GM45(i915) ? "CTG" : "ELK", reg_val);
193 
194 	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
195 		return;
196 
197 	/*
198 	 * Whether ILK really reuses the ELK register for this is unclear.
199 	 * Let's see if we catch anyone with this supposedly enabled on ILK.
200 	 */
201 	drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
202 		 "ILK stolen reserved found? 0x%08x\n",
203 		 reg_val);
204 
205 	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
206 		return;
207 
208 	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
209 	drm_WARN_ON(&i915->drm,
210 		    (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
211 
212 	*size = stolen_top - *base;
213 }
214 
215 static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
216 				     struct intel_uncore *uncore,
217 				     resource_size_t *base,
218 				     resource_size_t *size)
219 {
220 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
221 
222 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
223 
224 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
225 		return;
226 
227 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
228 
229 	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
230 	case GEN6_STOLEN_RESERVED_1M:
231 		*size = 1024 * 1024;
232 		break;
233 	case GEN6_STOLEN_RESERVED_512K:
234 		*size = 512 * 1024;
235 		break;
236 	case GEN6_STOLEN_RESERVED_256K:
237 		*size = 256 * 1024;
238 		break;
239 	case GEN6_STOLEN_RESERVED_128K:
240 		*size = 128 * 1024;
241 		break;
242 	default:
243 		*size = 1024 * 1024;
244 		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
245 	}
246 }
247 
248 static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
249 				    struct intel_uncore *uncore,
250 				    resource_size_t *base,
251 				    resource_size_t *size)
252 {
253 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
254 	resource_size_t stolen_top = i915->dsm.end + 1;
255 
256 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
257 
258 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
259 		return;
260 
261 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
262 	default:
263 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
264 		fallthrough;
265 	case GEN7_STOLEN_RESERVED_1M:
266 		*size = 1024 * 1024;
267 		break;
268 	}
269 
270 	/*
271 	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
272 	 * reserved location as (top - size).
273 	 */
274 	*base = stolen_top - *size;
275 }
276 
277 static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
278 				     struct intel_uncore *uncore,
279 				     resource_size_t *base,
280 				     resource_size_t *size)
281 {
282 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
283 
284 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
285 
286 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
287 		return;
288 
289 	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
290 
291 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
292 	case GEN7_STOLEN_RESERVED_1M:
293 		*size = 1024 * 1024;
294 		break;
295 	case GEN7_STOLEN_RESERVED_256K:
296 		*size = 256 * 1024;
297 		break;
298 	default:
299 		*size = 1024 * 1024;
300 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
301 	}
302 }
303 
304 static void chv_get_stolen_reserved(struct drm_i915_private *i915,
305 				    struct intel_uncore *uncore,
306 				    resource_size_t *base,
307 				    resource_size_t *size)
308 {
309 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
310 
311 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
312 
313 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
314 		return;
315 
316 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
317 
318 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
319 	case GEN8_STOLEN_RESERVED_1M:
320 		*size = 1024 * 1024;
321 		break;
322 	case GEN8_STOLEN_RESERVED_2M:
323 		*size = 2 * 1024 * 1024;
324 		break;
325 	case GEN8_STOLEN_RESERVED_4M:
326 		*size = 4 * 1024 * 1024;
327 		break;
328 	case GEN8_STOLEN_RESERVED_8M:
329 		*size = 8 * 1024 * 1024;
330 		break;
331 	default:
332 		*size = 8 * 1024 * 1024;
333 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
334 	}
335 }
336 
337 static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
338 				    struct intel_uncore *uncore,
339 				    resource_size_t *base,
340 				    resource_size_t *size)
341 {
342 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
343 	resource_size_t stolen_top = i915->dsm.end + 1;
344 
345 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
346 
347 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
348 		return;
349 
350 	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
351 		return;
352 
353 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
354 	*size = stolen_top - *base;
355 }
356 
357 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
358 				    struct intel_uncore *uncore,
359 				    resource_size_t *base,
360 				    resource_size_t *size)
361 {
362 	u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
363 
364 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
365 
366 	*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
367 
368 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
369 	case GEN8_STOLEN_RESERVED_1M:
370 		*size = 1024 * 1024;
371 		break;
372 	case GEN8_STOLEN_RESERVED_2M:
373 		*size = 2 * 1024 * 1024;
374 		break;
375 	case GEN8_STOLEN_RESERVED_4M:
376 		*size = 4 * 1024 * 1024;
377 		break;
378 	case GEN8_STOLEN_RESERVED_8M:
379 		*size = 8 * 1024 * 1024;
380 		break;
381 	default:
382 		*size = 8 * 1024 * 1024;
383 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
384 	}
385 }
386 
387 static int i915_gem_init_stolen(struct intel_memory_region *mem)
388 {
389 	struct drm_i915_private *i915 = mem->i915;
390 	struct intel_uncore *uncore = &i915->uncore;
391 	resource_size_t reserved_base, stolen_top;
392 	resource_size_t reserved_total, reserved_size;
393 
394 	mutex_init(&i915->mm.stolen_lock);
395 
396 	if (intel_vgpu_active(i915)) {
397 		drm_notice(&i915->drm,
398 			   "%s, disabling use of stolen memory\n",
399 			   "iGVT-g active");
400 		return 0;
401 	}
402 
403 	if (intel_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
404 		drm_notice(&i915->drm,
405 			   "%s, disabling use of stolen memory\n",
406 			   "DMAR active");
407 		return 0;
408 	}
409 
410 	if (resource_size(&mem->region) == 0)
411 		return 0;
412 
413 	i915->dsm = mem->region;
414 
415 	if (i915_adjust_stolen(i915, &i915->dsm))
416 		return 0;
417 
418 	GEM_BUG_ON(i915->dsm.start == 0);
419 	GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
420 
421 	stolen_top = i915->dsm.end + 1;
422 	reserved_base = stolen_top;
423 	reserved_size = 0;
424 
425 	switch (GRAPHICS_VER(i915)) {
426 	case 2:
427 	case 3:
428 		break;
429 	case 4:
430 		if (!IS_G4X(i915))
431 			break;
432 		fallthrough;
433 	case 5:
434 		g4x_get_stolen_reserved(i915, uncore,
435 					&reserved_base, &reserved_size);
436 		break;
437 	case 6:
438 		gen6_get_stolen_reserved(i915, uncore,
439 					 &reserved_base, &reserved_size);
440 		break;
441 	case 7:
442 		if (IS_VALLEYVIEW(i915))
443 			vlv_get_stolen_reserved(i915, uncore,
444 						&reserved_base, &reserved_size);
445 		else
446 			gen7_get_stolen_reserved(i915, uncore,
447 						 &reserved_base, &reserved_size);
448 		break;
449 	case 8:
450 	case 9:
451 		if (IS_LP(i915))
452 			chv_get_stolen_reserved(i915, uncore,
453 						&reserved_base, &reserved_size);
454 		else
455 			bdw_get_stolen_reserved(i915, uncore,
456 						&reserved_base, &reserved_size);
457 		break;
458 	default:
459 		MISSING_CASE(GRAPHICS_VER(i915));
460 		fallthrough;
461 	case 11:
462 	case 12:
463 		icl_get_stolen_reserved(i915, uncore,
464 					&reserved_base,
465 					&reserved_size);
466 		break;
467 	}
468 
469 	/*
470 	 * Our expectation is that the reserved space is at the top of the
471 	 * stolen region and *never* at the bottom. If we see !reserved_base,
472 	 * it likely means we failed to read the registers correctly.
473 	 */
474 	if (!reserved_base) {
475 		drm_err(&i915->drm,
476 			"inconsistent reservation %pa + %pa; ignoring\n",
477 			&reserved_base, &reserved_size);
478 		reserved_base = stolen_top;
479 		reserved_size = 0;
480 	}
481 
482 	i915->dsm_reserved =
483 		(struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
484 
485 	if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
486 		drm_err(&i915->drm,
487 			"Stolen reserved area %pR outside stolen memory %pR\n",
488 			&i915->dsm_reserved, &i915->dsm);
489 		return 0;
490 	}
491 
492 	/* Exclude the reserved region from driver use */
493 	mem->region.end = reserved_base - 1;
494 
495 	/* It is possible for the reserved area to end before the end of stolen
496 	 * memory, so just consider the start. */
497 	reserved_total = stolen_top - reserved_base;
498 
499 	drm_dbg(&i915->drm,
500 		"Memory reserved for graphics device: %lluK, usable: %lluK\n",
501 		(u64)resource_size(&i915->dsm) >> 10,
502 		((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
503 
504 	i915->stolen_usable_size =
505 		resource_size(&i915->dsm) - reserved_total;
506 
507 	/* Basic memrange allocator for stolen space. */
508 	drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
509 
510 	return 0;
511 }
512 
513 static void dbg_poison(struct i915_ggtt *ggtt,
514 		       dma_addr_t addr, resource_size_t size,
515 		       u8 x)
516 {
517 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
518 	if (!drm_mm_node_allocated(&ggtt->error_capture))
519 		return;
520 
521 	if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
522 		return; /* beware stop_machine() inversion */
523 
524 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
525 
526 	mutex_lock(&ggtt->error_mutex);
527 	while (size) {
528 		void __iomem *s;
529 
530 		ggtt->vm.insert_page(&ggtt->vm, addr,
531 				     ggtt->error_capture.start,
532 				     I915_CACHE_NONE, 0);
533 		mb();
534 
535 		s = io_mapping_map_wc(&ggtt->iomap,
536 				      ggtt->error_capture.start,
537 				      PAGE_SIZE);
538 		memset_io(s, x, PAGE_SIZE);
539 		io_mapping_unmap(s);
540 
541 		addr += PAGE_SIZE;
542 		size -= PAGE_SIZE;
543 	}
544 	mb();
545 	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
546 	mutex_unlock(&ggtt->error_mutex);
547 #endif
548 }
549 
550 static struct sg_table *
551 i915_pages_create_for_stolen(struct drm_device *dev,
552 			     resource_size_t offset, resource_size_t size)
553 {
554 	struct drm_i915_private *i915 = to_i915(dev);
555 	struct sg_table *st;
556 	struct scatterlist *sg;
557 
558 	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
559 
560 	/* We hide that we have no struct page backing our stolen object
561 	 * by wrapping the contiguous physical allocation with a fake
562 	 * dma mapping in a single scatterlist.
563 	 */
564 
565 	st = kmalloc(sizeof(*st), GFP_KERNEL);
566 	if (st == NULL)
567 		return ERR_PTR(-ENOMEM);
568 
569 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
570 		kfree(st);
571 		return ERR_PTR(-ENOMEM);
572 	}
573 
574 	sg = st->sgl;
575 	sg->offset = 0;
576 	sg->length = size;
577 
578 	sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
579 	sg_dma_len(sg) = size;
580 
581 	return st;
582 }
583 
584 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
585 {
586 	struct sg_table *pages =
587 		i915_pages_create_for_stolen(obj->base.dev,
588 					     obj->stolen->start,
589 					     obj->stolen->size);
590 	if (IS_ERR(pages))
591 		return PTR_ERR(pages);
592 
593 	dbg_poison(&to_i915(obj->base.dev)->ggtt,
594 		   sg_dma_address(pages->sgl),
595 		   sg_dma_len(pages->sgl),
596 		   POISON_INUSE);
597 
598 	__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
599 
600 	return 0;
601 }
602 
603 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
604 					     struct sg_table *pages)
605 {
606 	/* Should only be called from i915_gem_object_release_stolen() */
607 
608 	dbg_poison(&to_i915(obj->base.dev)->ggtt,
609 		   sg_dma_address(pages->sgl),
610 		   sg_dma_len(pages->sgl),
611 		   POISON_FREE);
612 
613 	sg_free_table(pages);
614 	kfree(pages);
615 }
616 
617 static void
618 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
619 {
620 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
621 	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
622 
623 	GEM_BUG_ON(!stolen);
624 	i915_gem_stolen_remove_node(i915, stolen);
625 	kfree(stolen);
626 
627 	i915_gem_object_release_memory_region(obj);
628 }
629 
630 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
631 	.name = "i915_gem_object_stolen",
632 	.get_pages = i915_gem_object_get_pages_stolen,
633 	.put_pages = i915_gem_object_put_pages_stolen,
634 	.release = i915_gem_object_release_stolen,
635 };
636 
637 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
638 					   struct drm_i915_gem_object *obj,
639 					   struct drm_mm_node *stolen)
640 {
641 	static struct lock_class_key lock_class;
642 	unsigned int cache_level;
643 	unsigned int flags;
644 	int err;
645 
646 	/*
647 	 * Stolen objects are always physically contiguous since we just
648 	 * allocate one big block underneath using the drm_mm range allocator.
649 	 */
650 	flags = I915_BO_ALLOC_CONTIGUOUS;
651 
652 	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
653 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
654 
655 	obj->stolen = stolen;
656 	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
657 	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
658 	i915_gem_object_set_cache_coherency(obj, cache_level);
659 
660 	if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
661 		return -EBUSY;
662 
663 	i915_gem_object_init_memory_region(obj, mem);
664 
665 	err = i915_gem_object_pin_pages(obj);
666 	if (err)
667 		i915_gem_object_release_memory_region(obj);
668 	i915_gem_object_unlock(obj);
669 
670 	return err;
671 }
672 
673 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
674 					struct drm_i915_gem_object *obj,
675 					resource_size_t size,
676 					resource_size_t page_size,
677 					unsigned int flags)
678 {
679 	struct drm_i915_private *i915 = mem->i915;
680 	struct drm_mm_node *stolen;
681 	int ret;
682 
683 	if (!drm_mm_initialized(&i915->mm.stolen))
684 		return -ENODEV;
685 
686 	if (size == 0)
687 		return -EINVAL;
688 
689 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
690 	if (!stolen)
691 		return -ENOMEM;
692 
693 	ret = i915_gem_stolen_insert_node(i915, stolen, size,
694 					  mem->min_page_size);
695 	if (ret)
696 		goto err_free;
697 
698 	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
699 	if (ret)
700 		goto err_remove;
701 
702 	return 0;
703 
704 err_remove:
705 	i915_gem_stolen_remove_node(i915, stolen);
706 err_free:
707 	kfree(stolen);
708 	return ret;
709 }
710 
711 struct drm_i915_gem_object *
712 i915_gem_object_create_stolen(struct drm_i915_private *i915,
713 			      resource_size_t size)
714 {
715 	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
716 }
717 
718 static int init_stolen_smem(struct intel_memory_region *mem)
719 {
720 	/*
721 	 * Initialise stolen early so that we may reserve preallocated
722 	 * objects for the BIOS to KMS transition.
723 	 */
724 	return i915_gem_init_stolen(mem);
725 }
726 
727 static int release_stolen_smem(struct intel_memory_region *mem)
728 {
729 	i915_gem_cleanup_stolen(mem->i915);
730 	return 0;
731 }
732 
733 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
734 	.init = init_stolen_smem,
735 	.release = release_stolen_smem,
736 	.init_object = _i915_gem_object_stolen_init,
737 };
738 
739 static int init_stolen_lmem(struct intel_memory_region *mem)
740 {
741 	int err;
742 
743 	if (GEM_WARN_ON(resource_size(&mem->region) == 0))
744 		return -ENODEV;
745 
746 	if (!io_mapping_init_wc(&mem->iomap,
747 				mem->io_start,
748 				resource_size(&mem->region)))
749 		return -EIO;
750 
751 	/*
752 	 * TODO: For stolen lmem we mostly just care about populating the dsm
753 	 * related bits and setting up the drm_mm allocator for the range.
754 	 * Perhaps split up i915_gem_init_stolen() for this.
755 	 */
756 	err = i915_gem_init_stolen(mem);
757 	if (err)
758 		goto err_fini;
759 
760 	return 0;
761 
762 err_fini:
763 	io_mapping_fini(&mem->iomap);
764 	return err;
765 }
766 
767 static int release_stolen_lmem(struct intel_memory_region *mem)
768 {
769 	io_mapping_fini(&mem->iomap);
770 	i915_gem_cleanup_stolen(mem->i915);
771 	return 0;
772 }
773 
774 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
775 	.init = init_stolen_lmem,
776 	.release = release_stolen_lmem,
777 	.init_object = _i915_gem_object_stolen_init,
778 };
779 
780 struct intel_memory_region *
781 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
782 			   u16 instance)
783 {
784 	struct intel_uncore *uncore = &i915->uncore;
785 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
786 	struct intel_memory_region *mem;
787 	resource_size_t min_page_size;
788 	resource_size_t io_start;
789 	resource_size_t lmem_size;
790 	u64 lmem_base;
791 
792 	lmem_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
793 	if (GEM_WARN_ON(lmem_base >= pci_resource_len(pdev, 2)))
794 		return ERR_PTR(-ENODEV);
795 
796 	lmem_size = pci_resource_len(pdev, 2) - lmem_base;
797 	io_start = pci_resource_start(pdev, 2) + lmem_base;
798 
799 	min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
800 						I915_GTT_PAGE_SIZE_4K;
801 
802 	mem = intel_memory_region_create(i915, lmem_base, lmem_size,
803 					 min_page_size, io_start,
804 					 type, instance,
805 					 &i915_region_stolen_lmem_ops);
806 	if (IS_ERR(mem))
807 		return mem;
808 
809 	/*
810 	 * TODO: consider creating common helper to just print all the
811 	 * interesting stuff from intel_memory_region, which we can use for all
812 	 * our probed regions.
813 	 */
814 
815 	drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
816 		&mem->io_start);
817 
818 	intel_memory_region_set_name(mem, "stolen-local");
819 
820 	mem->private = true;
821 
822 	return mem;
823 }
824 
825 struct intel_memory_region*
826 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
827 			   u16 instance)
828 {
829 	struct intel_memory_region *mem;
830 
831 	mem = intel_memory_region_create(i915,
832 					 intel_graphics_stolen_res.start,
833 					 resource_size(&intel_graphics_stolen_res),
834 					 PAGE_SIZE, 0, type, instance,
835 					 &i915_region_stolen_smem_ops);
836 	if (IS_ERR(mem))
837 		return mem;
838 
839 	intel_memory_region_set_name(mem, "stolen-system");
840 
841 	mem->private = true;
842 	return mem;
843 }
844 
845 struct drm_i915_gem_object *
846 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
847 					       resource_size_t stolen_offset,
848 					       resource_size_t size)
849 {
850 	struct intel_memory_region *mem = i915->mm.stolen_region;
851 	struct drm_i915_gem_object *obj;
852 	struct drm_mm_node *stolen;
853 	int ret;
854 
855 	if (!drm_mm_initialized(&i915->mm.stolen))
856 		return ERR_PTR(-ENODEV);
857 
858 	drm_dbg(&i915->drm,
859 		"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
860 		&stolen_offset, &size);
861 
862 	/* KISS and expect everything to be page-aligned */
863 	if (GEM_WARN_ON(size == 0) ||
864 	    GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
865 	    GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size)))
866 		return ERR_PTR(-EINVAL);
867 
868 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
869 	if (!stolen)
870 		return ERR_PTR(-ENOMEM);
871 
872 	stolen->start = stolen_offset;
873 	stolen->size = size;
874 	mutex_lock(&i915->mm.stolen_lock);
875 	ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
876 	mutex_unlock(&i915->mm.stolen_lock);
877 	if (ret)
878 		goto err_free;
879 
880 	obj = i915_gem_object_alloc();
881 	if (!obj) {
882 		ret = -ENOMEM;
883 		goto err_stolen;
884 	}
885 
886 	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
887 	if (ret)
888 		goto err_object_free;
889 
890 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
891 	return obj;
892 
893 err_object_free:
894 	i915_gem_object_free(obj);
895 err_stolen:
896 	i915_gem_stolen_remove_node(i915, stolen);
897 err_free:
898 	kfree(stolen);
899 	return ERR_PTR(ret);
900 }
901 
902 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
903 {
904 	return obj->ops == &i915_gem_object_stolen_ops;
905 }
906