xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_internal.c (revision c894ec016c9d0418dd832202225a8c64f450d71e)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/scatterlist.h>
8 #include <linux/slab.h>
9 
10 #include "i915_drv.h"
11 #include "i915_gem.h"
12 #include "i915_gem_internal.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15 #include "i915_utils.h"
16 
17 #define QUIET (__GFP_NORETRY | __GFP_NOWARN)
18 #define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
19 
20 static void internal_free_pages(struct sg_table *st)
21 {
22 	struct scatterlist *sg;
23 
24 	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
25 		if (sg_page(sg))
26 			__free_pages(sg_page(sg), get_order(sg->length));
27 	}
28 
29 	sg_free_table(st);
30 	kfree(st);
31 }
32 
33 static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
34 {
35 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
36 	struct sg_table *st;
37 	struct scatterlist *sg;
38 	unsigned int npages; /* restricted by sg_alloc_table */
39 	int max_order = MAX_ORDER;
40 	unsigned int max_segment;
41 	gfp_t gfp;
42 
43 	if (overflows_type(obj->base.size >> PAGE_SHIFT, npages))
44 		return -E2BIG;
45 
46 	npages = obj->base.size >> PAGE_SHIFT;
47 	max_segment = i915_sg_segment_size(i915->drm.dev) >> PAGE_SHIFT;
48 	max_order = min(max_order, get_order(max_segment));
49 
50 	gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
51 	if (IS_I965GM(i915) || IS_I965G(i915)) {
52 		/* 965gm cannot relocate objects above 4GiB. */
53 		gfp &= ~__GFP_HIGHMEM;
54 		gfp |= __GFP_DMA32;
55 	}
56 
57 create_st:
58 	st = kmalloc(sizeof(*st), GFP_KERNEL);
59 	if (!st)
60 		return -ENOMEM;
61 
62 	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
63 		kfree(st);
64 		return -ENOMEM;
65 	}
66 
67 	sg = st->sgl;
68 	st->nents = 0;
69 
70 	do {
71 		int order = min(fls(npages) - 1, max_order);
72 		struct page *page;
73 
74 		do {
75 			page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
76 					   order);
77 			if (page)
78 				break;
79 			if (!order--)
80 				goto err;
81 
82 			/* Limit subsequent allocations as well */
83 			max_order = order;
84 		} while (1);
85 
86 		sg_set_page(sg, page, PAGE_SIZE << order, 0);
87 		st->nents++;
88 
89 		npages -= 1 << order;
90 		if (!npages) {
91 			sg_mark_end(sg);
92 			break;
93 		}
94 
95 		sg = __sg_next(sg);
96 	} while (1);
97 
98 	if (i915_gem_gtt_prepare_pages(obj, st)) {
99 		/* Failed to dma-map try again with single page sg segments */
100 		if (get_order(st->sgl->length)) {
101 			internal_free_pages(st);
102 			max_order = 0;
103 			goto create_st;
104 		}
105 		goto err;
106 	}
107 
108 	__i915_gem_object_set_pages(obj, st);
109 
110 	return 0;
111 
112 err:
113 	sg_set_page(sg, NULL, 0, 0);
114 	sg_mark_end(sg);
115 	internal_free_pages(st);
116 
117 	return -ENOMEM;
118 }
119 
120 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
121 					       struct sg_table *pages)
122 {
123 	i915_gem_gtt_finish_pages(obj, pages);
124 	internal_free_pages(pages);
125 
126 	obj->mm.dirty = false;
127 
128 	__start_cpu_write(obj);
129 }
130 
131 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
132 	.name = "i915_gem_object_internal",
133 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
134 	.get_pages = i915_gem_object_get_pages_internal,
135 	.put_pages = i915_gem_object_put_pages_internal,
136 };
137 
138 struct drm_i915_gem_object *
139 __i915_gem_object_create_internal(struct drm_i915_private *i915,
140 				  const struct drm_i915_gem_object_ops *ops,
141 				  phys_addr_t size)
142 {
143 	static struct lock_class_key lock_class;
144 	struct drm_i915_gem_object *obj;
145 	unsigned int cache_level;
146 
147 	GEM_BUG_ON(!size);
148 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
149 
150 	if (overflows_type(size, obj->base.size))
151 		return ERR_PTR(-E2BIG);
152 
153 	obj = i915_gem_object_alloc();
154 	if (!obj)
155 		return ERR_PTR(-ENOMEM);
156 
157 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
158 	i915_gem_object_init(obj, ops, &lock_class, 0);
159 	obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
160 
161 	/*
162 	 * Mark the object as volatile, such that the pages are marked as
163 	 * dontneed whilst they are still pinned. As soon as they are unpinned
164 	 * they are allowed to be reaped by the shrinker, and the caller is
165 	 * expected to repopulate - the contents of this object are only valid
166 	 * whilst active and pinned.
167 	 */
168 	i915_gem_object_set_volatile(obj);
169 
170 	obj->read_domains = I915_GEM_DOMAIN_CPU;
171 	obj->write_domain = I915_GEM_DOMAIN_CPU;
172 
173 	cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
174 	i915_gem_object_set_cache_coherency(obj, cache_level);
175 
176 	return obj;
177 }
178 
179 /**
180  * i915_gem_object_create_internal: create an object with volatile pages
181  * @i915: the i915 device
182  * @size: the size in bytes of backing storage to allocate for the object
183  *
184  * Creates a new object that wraps some internal memory for private use.
185  * This object is not backed by swappable storage, and as such its contents
186  * are volatile and only valid whilst pinned. If the object is reaped by the
187  * shrinker, its pages and data will be discarded. Equally, it is not a full
188  * GEM object and so not valid for access from userspace. This makes it useful
189  * for hardware interfaces like ringbuffers (which are pinned from the time
190  * the request is written to the time the hardware stops accessing it), but
191  * not for contexts (which need to be preserved when not active for later
192  * reuse). Note that it is not cleared upon allocation.
193  */
194 struct drm_i915_gem_object *
195 i915_gem_object_create_internal(struct drm_i915_private *i915,
196 				phys_addr_t size)
197 {
198 	return __i915_gem_object_create_internal(i915, &i915_gem_object_internal_ops, size);
199 }
200