xref: /linux/drivers/gpu/drm/i915/intel_memory_region.c (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "intel_memory_region.h"
7 #include "i915_drv.h"
8 #include "i915_ttm_buddy_manager.h"
9 
10 static const struct {
11 	u16 class;
12 	u16 instance;
13 } intel_region_map[] = {
14 	[INTEL_REGION_SMEM] = {
15 		.class = INTEL_MEMORY_SYSTEM,
16 		.instance = 0,
17 	},
18 	[INTEL_REGION_LMEM] = {
19 		.class = INTEL_MEMORY_LOCAL,
20 		.instance = 0,
21 	},
22 	[INTEL_REGION_STOLEN_SMEM] = {
23 		.class = INTEL_MEMORY_STOLEN_SYSTEM,
24 		.instance = 0,
25 	},
26 	[INTEL_REGION_STOLEN_LMEM] = {
27 		.class = INTEL_MEMORY_STOLEN_LOCAL,
28 		.instance = 0,
29 	},
30 };
31 
32 struct intel_memory_region *
33 intel_memory_region_lookup(struct drm_i915_private *i915,
34 			   u16 class, u16 instance)
35 {
36 	struct intel_memory_region *mr;
37 	int id;
38 
39 	/* XXX: consider maybe converting to an rb tree at some point */
40 	for_each_memory_region(mr, i915, id) {
41 		if (mr->type == class && mr->instance == instance)
42 			return mr;
43 	}
44 
45 	return NULL;
46 }
47 
48 struct intel_memory_region *
49 intel_memory_region_by_type(struct drm_i915_private *i915,
50 			    enum intel_memory_type mem_type)
51 {
52 	struct intel_memory_region *mr;
53 	int id;
54 
55 	for_each_memory_region(mr, i915, id)
56 		if (mr->type == mem_type)
57 			return mr;
58 
59 	return NULL;
60 }
61 
62 /**
63  * intel_memory_region_reserve - Reserve a memory range
64  * @mem: The region for which we want to reserve a range.
65  * @offset: Start of the range to reserve.
66  * @size: The size of the range to reserve.
67  *
68  * Return: 0 on success, negative error code on failure.
69  */
70 int intel_memory_region_reserve(struct intel_memory_region *mem,
71 				resource_size_t offset,
72 				resource_size_t size)
73 {
74 	struct ttm_resource_manager *man = mem->region_private;
75 
76 	GEM_BUG_ON(mem->is_range_manager);
77 
78 	return i915_ttm_buddy_man_reserve(man, offset, size);
79 }
80 
81 struct intel_memory_region *
82 intel_memory_region_create(struct drm_i915_private *i915,
83 			   resource_size_t start,
84 			   resource_size_t size,
85 			   resource_size_t min_page_size,
86 			   resource_size_t io_start,
87 			   u16 type,
88 			   u16 instance,
89 			   const struct intel_memory_region_ops *ops)
90 {
91 	struct intel_memory_region *mem;
92 	int err;
93 
94 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
95 	if (!mem)
96 		return ERR_PTR(-ENOMEM);
97 
98 	mem->i915 = i915;
99 	mem->region = (struct resource)DEFINE_RES_MEM(start, size);
100 	mem->io_start = io_start;
101 	mem->min_page_size = min_page_size;
102 	mem->ops = ops;
103 	mem->total = size;
104 	mem->avail = mem->total;
105 	mem->type = type;
106 	mem->instance = instance;
107 
108 	mutex_init(&mem->objects.lock);
109 	INIT_LIST_HEAD(&mem->objects.list);
110 
111 	if (ops->init) {
112 		err = ops->init(mem);
113 		if (err)
114 			goto err_free;
115 	}
116 
117 	kref_init(&mem->kref);
118 	return mem;
119 
120 err_free:
121 	kfree(mem);
122 	return ERR_PTR(err);
123 }
124 
125 void intel_memory_region_set_name(struct intel_memory_region *mem,
126 				  const char *fmt, ...)
127 {
128 	va_list ap;
129 
130 	va_start(ap, fmt);
131 	vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
132 	va_end(ap);
133 }
134 
135 static void __intel_memory_region_destroy(struct kref *kref)
136 {
137 	struct intel_memory_region *mem =
138 		container_of(kref, typeof(*mem), kref);
139 
140 	if (mem->ops->release)
141 		mem->ops->release(mem);
142 
143 	mutex_destroy(&mem->objects.lock);
144 	kfree(mem);
145 }
146 
147 struct intel_memory_region *
148 intel_memory_region_get(struct intel_memory_region *mem)
149 {
150 	kref_get(&mem->kref);
151 	return mem;
152 }
153 
154 void intel_memory_region_put(struct intel_memory_region *mem)
155 {
156 	kref_put(&mem->kref, __intel_memory_region_destroy);
157 }
158 
159 /* Global memory region registration -- only slight layer inversions! */
160 
161 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
162 {
163 	int err, i;
164 
165 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
166 		struct intel_memory_region *mem = ERR_PTR(-ENODEV);
167 		u16 type, instance;
168 
169 		if (!HAS_REGION(i915, BIT(i)))
170 			continue;
171 
172 		type = intel_region_map[i].class;
173 		instance = intel_region_map[i].instance;
174 		switch (type) {
175 		case INTEL_MEMORY_SYSTEM:
176 			if (IS_DGFX(i915))
177 				mem = i915_gem_ttm_system_setup(i915, type,
178 								instance);
179 			else
180 				mem = i915_gem_shmem_setup(i915, type,
181 							   instance);
182 			break;
183 		case INTEL_MEMORY_STOLEN_LOCAL:
184 			mem = i915_gem_stolen_lmem_setup(i915, type, instance);
185 			if (!IS_ERR(mem))
186 				i915->mm.stolen_region = mem;
187 			break;
188 		case INTEL_MEMORY_STOLEN_SYSTEM:
189 			mem = i915_gem_stolen_smem_setup(i915, type, instance);
190 			if (!IS_ERR(mem))
191 				i915->mm.stolen_region = mem;
192 			break;
193 		default:
194 			continue;
195 		}
196 
197 		if (IS_ERR(mem)) {
198 			err = PTR_ERR(mem);
199 			drm_err(&i915->drm,
200 				"Failed to setup region(%d) type=%d\n",
201 				err, type);
202 			goto out_cleanup;
203 		}
204 
205 		mem->id = i;
206 		i915->mm.regions[i] = mem;
207 	}
208 
209 	return 0;
210 
211 out_cleanup:
212 	intel_memory_regions_driver_release(i915);
213 	return err;
214 }
215 
216 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
217 {
218 	int i;
219 
220 	for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
221 		struct intel_memory_region *region =
222 			fetch_and_zero(&i915->mm.regions[i]);
223 
224 		if (region)
225 			intel_memory_region_put(region);
226 	}
227 }
228 
229 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
230 #include "selftests/intel_memory_region.c"
231 #include "selftests/mock_region.c"
232 #endif
233