xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_stolen.c (revision 15a1fbdcfb519c2bd291ed01c6c94e0b89537a77)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2012 Intel Corporation
5  */
6 
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
9 
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
12 
13 #include "gem/i915_gem_region.h"
14 #include "i915_drv.h"
15 #include "i915_gem_stolen.h"
16 
17 /*
18  * The BIOS typically reserves some of the system's memory for the exclusive
19  * use of the integrated graphics. This memory is no longer available for
20  * use by the OS and so the user finds that his system has less memory
21  * available than he put in. We refer to this memory as stolen.
22  *
23  * The BIOS will allocate its framebuffer from the stolen memory. Our
24  * goal is try to reuse that object for our own fbcon which must always
25  * be available for panics. Anything else we can reuse the stolen memory
26  * for is a boon.
27  */
28 
29 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
30 					 struct drm_mm_node *node, u64 size,
31 					 unsigned alignment, u64 start, u64 end)
32 {
33 	int ret;
34 
35 	if (!drm_mm_initialized(&i915->mm.stolen))
36 		return -ENODEV;
37 
38 	/* WaSkipStolenMemoryFirstPage:bdw+ */
39 	if (INTEL_GEN(i915) >= 8 && start < 4096)
40 		start = 4096;
41 
42 	mutex_lock(&i915->mm.stolen_lock);
43 	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
44 					  size, alignment, 0,
45 					  start, end, DRM_MM_INSERT_BEST);
46 	mutex_unlock(&i915->mm.stolen_lock);
47 
48 	return ret;
49 }
50 
51 int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
52 				struct drm_mm_node *node, u64 size,
53 				unsigned alignment)
54 {
55 	return i915_gem_stolen_insert_node_in_range(i915, node, size,
56 						    alignment, 0, U64_MAX);
57 }
58 
59 void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
60 				 struct drm_mm_node *node)
61 {
62 	mutex_lock(&i915->mm.stolen_lock);
63 	drm_mm_remove_node(node);
64 	mutex_unlock(&i915->mm.stolen_lock);
65 }
66 
67 static int i915_adjust_stolen(struct drm_i915_private *i915,
68 			      struct resource *dsm)
69 {
70 	struct i915_ggtt *ggtt = &i915->ggtt;
71 	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
72 	struct resource *r;
73 
74 	if (dsm->start == 0 || dsm->end <= dsm->start)
75 		return -EINVAL;
76 
77 	/*
78 	 * TODO: We have yet too encounter the case where the GTT wasn't at the
79 	 * end of stolen. With that assumption we could simplify this.
80 	 */
81 
82 	/* Make sure we don't clobber the GTT if it's within stolen memory */
83 	if (INTEL_GEN(i915) <= 4 &&
84 	    !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
85 		struct resource stolen[2] = {*dsm, *dsm};
86 		struct resource ggtt_res;
87 		resource_size_t ggtt_start;
88 
89 		ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
90 		if (IS_GEN(i915, 4))
91 			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
92 				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
93 		else
94 			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
95 
96 		ggtt_res =
97 			(struct resource) DEFINE_RES_MEM(ggtt_start,
98 							 ggtt_total_entries(ggtt) * 4);
99 
100 		if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
101 			stolen[0].end = ggtt_res.start;
102 		if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
103 			stolen[1].start = ggtt_res.end;
104 
105 		/* Pick the larger of the two chunks */
106 		if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
107 			*dsm = stolen[0];
108 		else
109 			*dsm = stolen[1];
110 
111 		if (stolen[0].start != stolen[1].start ||
112 		    stolen[0].end != stolen[1].end) {
113 			drm_dbg(&i915->drm,
114 				"GTT within stolen memory at %pR\n",
115 				&ggtt_res);
116 			drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
117 				dsm);
118 		}
119 	}
120 
121 	/*
122 	 * Verify that nothing else uses this physical address. Stolen
123 	 * memory should be reserved by the BIOS and hidden from the
124 	 * kernel. So if the region is already marked as busy, something
125 	 * is seriously wrong.
126 	 */
127 	r = devm_request_mem_region(i915->drm.dev, dsm->start,
128 				    resource_size(dsm),
129 				    "Graphics Stolen Memory");
130 	if (r == NULL) {
131 		/*
132 		 * One more attempt but this time requesting region from
133 		 * start + 1, as we have seen that this resolves the region
134 		 * conflict with the PCI Bus.
135 		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
136 		 * PCI bus, but have an off-by-one error. Hence retry the
137 		 * reservation starting from 1 instead of 0.
138 		 * There's also BIOS with off-by-one on the other end.
139 		 */
140 		r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
141 					    resource_size(dsm) - 2,
142 					    "Graphics Stolen Memory");
143 		/*
144 		 * GEN3 firmware likes to smash pci bridges into the stolen
145 		 * range. Apparently this works.
146 		 */
147 		if (!r && !IS_GEN(i915, 3)) {
148 			drm_err(&i915->drm,
149 				"conflict detected with stolen region: %pR\n",
150 				dsm);
151 
152 			return -EBUSY;
153 		}
154 	}
155 
156 	return 0;
157 }
158 
159 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
160 {
161 	if (!drm_mm_initialized(&i915->mm.stolen))
162 		return;
163 
164 	drm_mm_takedown(&i915->mm.stolen);
165 }
166 
167 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
168 				    struct intel_uncore *uncore,
169 				    resource_size_t *base,
170 				    resource_size_t *size)
171 {
172 	u32 reg_val = intel_uncore_read(uncore,
173 					IS_GM45(i915) ?
174 					CTG_STOLEN_RESERVED :
175 					ELK_STOLEN_RESERVED);
176 	resource_size_t stolen_top = i915->dsm.end + 1;
177 
178 	drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
179 		IS_GM45(i915) ? "CTG" : "ELK", reg_val);
180 
181 	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
182 		return;
183 
184 	/*
185 	 * Whether ILK really reuses the ELK register for this is unclear.
186 	 * Let's see if we catch anyone with this supposedly enabled on ILK.
187 	 */
188 	drm_WARN(&i915->drm, IS_GEN(i915, 5),
189 		 "ILK stolen reserved found? 0x%08x\n",
190 		 reg_val);
191 
192 	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
193 		return;
194 
195 	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
196 	drm_WARN_ON(&i915->drm,
197 		    (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
198 
199 	*size = stolen_top - *base;
200 }
201 
202 static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
203 				     struct intel_uncore *uncore,
204 				     resource_size_t *base,
205 				     resource_size_t *size)
206 {
207 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
208 
209 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
210 
211 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
212 		return;
213 
214 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
215 
216 	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
217 	case GEN6_STOLEN_RESERVED_1M:
218 		*size = 1024 * 1024;
219 		break;
220 	case GEN6_STOLEN_RESERVED_512K:
221 		*size = 512 * 1024;
222 		break;
223 	case GEN6_STOLEN_RESERVED_256K:
224 		*size = 256 * 1024;
225 		break;
226 	case GEN6_STOLEN_RESERVED_128K:
227 		*size = 128 * 1024;
228 		break;
229 	default:
230 		*size = 1024 * 1024;
231 		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
232 	}
233 }
234 
235 static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
236 				    struct intel_uncore *uncore,
237 				    resource_size_t *base,
238 				    resource_size_t *size)
239 {
240 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
241 	resource_size_t stolen_top = i915->dsm.end + 1;
242 
243 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
244 
245 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
246 		return;
247 
248 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
249 	default:
250 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
251 		/* fall through */
252 	case GEN7_STOLEN_RESERVED_1M:
253 		*size = 1024 * 1024;
254 		break;
255 	}
256 
257 	/*
258 	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
259 	 * reserved location as (top - size).
260 	 */
261 	*base = stolen_top - *size;
262 }
263 
264 static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
265 				     struct intel_uncore *uncore,
266 				     resource_size_t *base,
267 				     resource_size_t *size)
268 {
269 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
270 
271 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
272 
273 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
274 		return;
275 
276 	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
277 
278 	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
279 	case GEN7_STOLEN_RESERVED_1M:
280 		*size = 1024 * 1024;
281 		break;
282 	case GEN7_STOLEN_RESERVED_256K:
283 		*size = 256 * 1024;
284 		break;
285 	default:
286 		*size = 1024 * 1024;
287 		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
288 	}
289 }
290 
291 static void chv_get_stolen_reserved(struct drm_i915_private *i915,
292 				    struct intel_uncore *uncore,
293 				    resource_size_t *base,
294 				    resource_size_t *size)
295 {
296 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
297 
298 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
299 
300 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
301 		return;
302 
303 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
304 
305 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
306 	case GEN8_STOLEN_RESERVED_1M:
307 		*size = 1024 * 1024;
308 		break;
309 	case GEN8_STOLEN_RESERVED_2M:
310 		*size = 2 * 1024 * 1024;
311 		break;
312 	case GEN8_STOLEN_RESERVED_4M:
313 		*size = 4 * 1024 * 1024;
314 		break;
315 	case GEN8_STOLEN_RESERVED_8M:
316 		*size = 8 * 1024 * 1024;
317 		break;
318 	default:
319 		*size = 8 * 1024 * 1024;
320 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
321 	}
322 }
323 
324 static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
325 				    struct intel_uncore *uncore,
326 				    resource_size_t *base,
327 				    resource_size_t *size)
328 {
329 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
330 	resource_size_t stolen_top = i915->dsm.end + 1;
331 
332 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
333 
334 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
335 		return;
336 
337 	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
338 		return;
339 
340 	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
341 	*size = stolen_top - *base;
342 }
343 
344 static void icl_get_stolen_reserved(struct drm_i915_private *i915,
345 				    struct intel_uncore *uncore,
346 				    resource_size_t *base,
347 				    resource_size_t *size)
348 {
349 	u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
350 
351 	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
352 
353 	*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
354 
355 	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
356 	case GEN8_STOLEN_RESERVED_1M:
357 		*size = 1024 * 1024;
358 		break;
359 	case GEN8_STOLEN_RESERVED_2M:
360 		*size = 2 * 1024 * 1024;
361 		break;
362 	case GEN8_STOLEN_RESERVED_4M:
363 		*size = 4 * 1024 * 1024;
364 		break;
365 	case GEN8_STOLEN_RESERVED_8M:
366 		*size = 8 * 1024 * 1024;
367 		break;
368 	default:
369 		*size = 8 * 1024 * 1024;
370 		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
371 	}
372 }
373 
374 static int i915_gem_init_stolen(struct drm_i915_private *i915)
375 {
376 	struct intel_uncore *uncore = &i915->uncore;
377 	resource_size_t reserved_base, stolen_top;
378 	resource_size_t reserved_total, reserved_size;
379 
380 	mutex_init(&i915->mm.stolen_lock);
381 
382 	if (intel_vgpu_active(i915)) {
383 		dev_notice(i915->drm.dev,
384 			   "%s, disabling use of stolen memory\n",
385 			   "iGVT-g active");
386 		return 0;
387 	}
388 
389 	if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
390 		dev_notice(i915->drm.dev,
391 			   "%s, disabling use of stolen memory\n",
392 			   "DMAR active");
393 		return 0;
394 	}
395 
396 	if (resource_size(&intel_graphics_stolen_res) == 0)
397 		return 0;
398 
399 	i915->dsm = intel_graphics_stolen_res;
400 
401 	if (i915_adjust_stolen(i915, &i915->dsm))
402 		return 0;
403 
404 	GEM_BUG_ON(i915->dsm.start == 0);
405 	GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
406 
407 	stolen_top = i915->dsm.end + 1;
408 	reserved_base = stolen_top;
409 	reserved_size = 0;
410 
411 	switch (INTEL_GEN(i915)) {
412 	case 2:
413 	case 3:
414 		break;
415 	case 4:
416 		if (!IS_G4X(i915))
417 			break;
418 		/* fall through */
419 	case 5:
420 		g4x_get_stolen_reserved(i915, uncore,
421 					&reserved_base, &reserved_size);
422 		break;
423 	case 6:
424 		gen6_get_stolen_reserved(i915, uncore,
425 					 &reserved_base, &reserved_size);
426 		break;
427 	case 7:
428 		if (IS_VALLEYVIEW(i915))
429 			vlv_get_stolen_reserved(i915, uncore,
430 						&reserved_base, &reserved_size);
431 		else
432 			gen7_get_stolen_reserved(i915, uncore,
433 						 &reserved_base, &reserved_size);
434 		break;
435 	case 8:
436 	case 9:
437 	case 10:
438 		if (IS_LP(i915))
439 			chv_get_stolen_reserved(i915, uncore,
440 						&reserved_base, &reserved_size);
441 		else
442 			bdw_get_stolen_reserved(i915, uncore,
443 						&reserved_base, &reserved_size);
444 		break;
445 	default:
446 		MISSING_CASE(INTEL_GEN(i915));
447 		/* fall-through */
448 	case 11:
449 	case 12:
450 		icl_get_stolen_reserved(i915, uncore,
451 					&reserved_base,
452 					&reserved_size);
453 		break;
454 	}
455 
456 	/*
457 	 * Our expectation is that the reserved space is at the top of the
458 	 * stolen region and *never* at the bottom. If we see !reserved_base,
459 	 * it likely means we failed to read the registers correctly.
460 	 */
461 	if (!reserved_base) {
462 		drm_err(&i915->drm,
463 			"inconsistent reservation %pa + %pa; ignoring\n",
464 			&reserved_base, &reserved_size);
465 		reserved_base = stolen_top;
466 		reserved_size = 0;
467 	}
468 
469 	i915->dsm_reserved =
470 		(struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
471 
472 	if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
473 		drm_err(&i915->drm,
474 			"Stolen reserved area %pR outside stolen memory %pR\n",
475 			&i915->dsm_reserved, &i915->dsm);
476 		return 0;
477 	}
478 
479 	/* It is possible for the reserved area to end before the end of stolen
480 	 * memory, so just consider the start. */
481 	reserved_total = stolen_top - reserved_base;
482 
483 	drm_dbg(&i915->drm,
484 		"Memory reserved for graphics device: %lluK, usable: %lluK\n",
485 		(u64)resource_size(&i915->dsm) >> 10,
486 		((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
487 
488 	i915->stolen_usable_size =
489 		resource_size(&i915->dsm) - reserved_total;
490 
491 	/* Basic memrange allocator for stolen space. */
492 	drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
493 
494 	return 0;
495 }
496 
497 static struct sg_table *
498 i915_pages_create_for_stolen(struct drm_device *dev,
499 			     resource_size_t offset, resource_size_t size)
500 {
501 	struct drm_i915_private *i915 = to_i915(dev);
502 	struct sg_table *st;
503 	struct scatterlist *sg;
504 
505 	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
506 
507 	/* We hide that we have no struct page backing our stolen object
508 	 * by wrapping the contiguous physical allocation with a fake
509 	 * dma mapping in a single scatterlist.
510 	 */
511 
512 	st = kmalloc(sizeof(*st), GFP_KERNEL);
513 	if (st == NULL)
514 		return ERR_PTR(-ENOMEM);
515 
516 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
517 		kfree(st);
518 		return ERR_PTR(-ENOMEM);
519 	}
520 
521 	sg = st->sgl;
522 	sg->offset = 0;
523 	sg->length = size;
524 
525 	sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
526 	sg_dma_len(sg) = size;
527 
528 	return st;
529 }
530 
531 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
532 {
533 	struct sg_table *pages =
534 		i915_pages_create_for_stolen(obj->base.dev,
535 					     obj->stolen->start,
536 					     obj->stolen->size);
537 	if (IS_ERR(pages))
538 		return PTR_ERR(pages);
539 
540 	__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
541 
542 	return 0;
543 }
544 
545 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
546 					     struct sg_table *pages)
547 {
548 	/* Should only be called from i915_gem_object_release_stolen() */
549 	sg_free_table(pages);
550 	kfree(pages);
551 }
552 
553 static void
554 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
555 {
556 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
557 	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
558 
559 	GEM_BUG_ON(!stolen);
560 
561 	i915_gem_object_release_memory_region(obj);
562 
563 	i915_gem_stolen_remove_node(i915, stolen);
564 	kfree(stolen);
565 }
566 
567 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
568 	.get_pages = i915_gem_object_get_pages_stolen,
569 	.put_pages = i915_gem_object_put_pages_stolen,
570 	.release = i915_gem_object_release_stolen,
571 };
572 
573 static struct drm_i915_gem_object *
574 __i915_gem_object_create_stolen(struct intel_memory_region *mem,
575 				struct drm_mm_node *stolen)
576 {
577 	static struct lock_class_key lock_class;
578 	struct drm_i915_gem_object *obj;
579 	unsigned int cache_level;
580 	int err = -ENOMEM;
581 
582 	obj = i915_gem_object_alloc();
583 	if (!obj)
584 		goto err;
585 
586 	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
587 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
588 
589 	obj->stolen = stolen;
590 	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
591 	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
592 	i915_gem_object_set_cache_coherency(obj, cache_level);
593 
594 	err = i915_gem_object_pin_pages(obj);
595 	if (err)
596 		goto cleanup;
597 
598 	i915_gem_object_init_memory_region(obj, mem, 0);
599 
600 	return obj;
601 
602 cleanup:
603 	i915_gem_object_free(obj);
604 err:
605 	return ERR_PTR(err);
606 }
607 
608 static struct drm_i915_gem_object *
609 _i915_gem_object_create_stolen(struct intel_memory_region *mem,
610 			       resource_size_t size,
611 			       unsigned int flags)
612 {
613 	struct drm_i915_private *i915 = mem->i915;
614 	struct drm_i915_gem_object *obj;
615 	struct drm_mm_node *stolen;
616 	int ret;
617 
618 	if (!drm_mm_initialized(&i915->mm.stolen))
619 		return ERR_PTR(-ENODEV);
620 
621 	if (size == 0)
622 		return ERR_PTR(-EINVAL);
623 
624 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
625 	if (!stolen)
626 		return ERR_PTR(-ENOMEM);
627 
628 	ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
629 	if (ret) {
630 		obj = ERR_PTR(ret);
631 		goto err_free;
632 	}
633 
634 	obj = __i915_gem_object_create_stolen(mem, stolen);
635 	if (IS_ERR(obj))
636 		goto err_remove;
637 
638 	return obj;
639 
640 err_remove:
641 	i915_gem_stolen_remove_node(i915, stolen);
642 err_free:
643 	kfree(stolen);
644 	return obj;
645 }
646 
647 struct drm_i915_gem_object *
648 i915_gem_object_create_stolen(struct drm_i915_private *i915,
649 			      resource_size_t size)
650 {
651 	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN],
652 					     size, I915_BO_ALLOC_CONTIGUOUS);
653 }
654 
655 static int init_stolen(struct intel_memory_region *mem)
656 {
657 	intel_memory_region_set_name(mem, "stolen");
658 
659 	/*
660 	 * Initialise stolen early so that we may reserve preallocated
661 	 * objects for the BIOS to KMS transition.
662 	 */
663 	return i915_gem_init_stolen(mem->i915);
664 }
665 
666 static void release_stolen(struct intel_memory_region *mem)
667 {
668 	i915_gem_cleanup_stolen(mem->i915);
669 }
670 
671 static const struct intel_memory_region_ops i915_region_stolen_ops = {
672 	.init = init_stolen,
673 	.release = release_stolen,
674 	.create_object = _i915_gem_object_create_stolen,
675 };
676 
677 struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
678 {
679 	return intel_memory_region_create(i915,
680 					  intel_graphics_stolen_res.start,
681 					  resource_size(&intel_graphics_stolen_res),
682 					  PAGE_SIZE, 0,
683 					  &i915_region_stolen_ops);
684 }
685 
686 struct drm_i915_gem_object *
687 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
688 					       resource_size_t stolen_offset,
689 					       resource_size_t size)
690 {
691 	struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
692 	struct drm_i915_gem_object *obj;
693 	struct drm_mm_node *stolen;
694 	int ret;
695 
696 	if (!drm_mm_initialized(&i915->mm.stolen))
697 		return ERR_PTR(-ENODEV);
698 
699 	drm_dbg(&i915->drm,
700 		"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
701 		&stolen_offset, &size);
702 
703 	/* KISS and expect everything to be page-aligned */
704 	if (GEM_WARN_ON(size == 0) ||
705 	    GEM_WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
706 	    GEM_WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
707 		return ERR_PTR(-EINVAL);
708 
709 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
710 	if (!stolen)
711 		return ERR_PTR(-ENOMEM);
712 
713 	stolen->start = stolen_offset;
714 	stolen->size = size;
715 	mutex_lock(&i915->mm.stolen_lock);
716 	ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
717 	mutex_unlock(&i915->mm.stolen_lock);
718 	if (ret) {
719 		obj = ERR_PTR(ret);
720 		goto err_free;
721 	}
722 
723 	obj = __i915_gem_object_create_stolen(mem, stolen);
724 	if (IS_ERR(obj))
725 		goto err_stolen;
726 
727 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
728 	return obj;
729 
730 err_stolen:
731 	i915_gem_stolen_remove_node(i915, stolen);
732 err_free:
733 	kfree(stolen);
734 	return obj;
735 }
736