xref: /linux/drivers/gpu/drm/i915/gt/intel_engine_user.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/list.h>
7 #include <linux/list_sort.h>
8 #include <linux/llist.h>
9 
10 #include "i915_drv.h"
11 #include "intel_engine.h"
12 #include "intel_engine_user.h"
13 #include "intel_gt.h"
14 #include "uc/intel_guc_submission.h"
15 
16 struct intel_engine_cs *
17 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
18 {
19 	struct rb_node *p = i915->uabi_engines.rb_node;
20 
21 	while (p) {
22 		struct intel_engine_cs *it =
23 			rb_entry(p, typeof(*it), uabi_node);
24 
25 		if (class < it->uabi_class)
26 			p = p->rb_left;
27 		else if (class > it->uabi_class ||
28 			 instance > it->uabi_instance)
29 			p = p->rb_right;
30 		else if (instance < it->uabi_instance)
31 			p = p->rb_left;
32 		else
33 			return it;
34 	}
35 
36 	return NULL;
37 }
38 
39 void intel_engine_add_user(struct intel_engine_cs *engine)
40 {
41 	llist_add(&engine->uabi_llist, &engine->i915->uabi_engines_llist);
42 }
43 
44 #define I915_NO_UABI_CLASS ((u16)(-1))
45 
46 static const u16 uabi_classes[] = {
47 	[RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
48 	[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
49 	[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
50 	[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
51 	[COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
52 	[OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
53 };
54 
55 static int engine_cmp(void *priv, const struct list_head *A,
56 		      const struct list_head *B)
57 {
58 	const struct intel_engine_cs *a =
59 		container_of(A, typeof(*a), uabi_list);
60 	const struct intel_engine_cs *b =
61 		container_of(B, typeof(*b), uabi_list);
62 
63 	if (uabi_classes[a->class] < uabi_classes[b->class])
64 		return -1;
65 	if (uabi_classes[a->class] > uabi_classes[b->class])
66 		return 1;
67 
68 	if (a->instance < b->instance)
69 		return -1;
70 	if (a->instance > b->instance)
71 		return 1;
72 
73 	return 0;
74 }
75 
76 static struct llist_node *get_engines(struct drm_i915_private *i915)
77 {
78 	return llist_del_all(&i915->uabi_engines_llist);
79 }
80 
81 static void sort_engines(struct drm_i915_private *i915,
82 			 struct list_head *engines)
83 {
84 	struct llist_node *pos, *next;
85 
86 	llist_for_each_safe(pos, next, get_engines(i915)) {
87 		struct intel_engine_cs *engine =
88 			container_of(pos, typeof(*engine), uabi_llist);
89 		list_add(&engine->uabi_list, engines);
90 	}
91 	list_sort(NULL, engines, engine_cmp);
92 }
93 
94 static void set_scheduler_caps(struct drm_i915_private *i915)
95 {
96 	static const struct {
97 		u8 engine;
98 		u8 sched;
99 	} map[] = {
100 #define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
101 		MAP(HAS_PREEMPTION, PREEMPTION),
102 		MAP(HAS_SEMAPHORES, SEMAPHORES),
103 		MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
104 #undef MAP
105 	};
106 	struct intel_engine_cs *engine;
107 	u32 enabled, disabled;
108 
109 	enabled = 0;
110 	disabled = 0;
111 	for_each_uabi_engine(engine, i915) { /* all engines must agree! */
112 		int i;
113 
114 		if (engine->sched_engine->schedule)
115 			enabled |= (I915_SCHEDULER_CAP_ENABLED |
116 				    I915_SCHEDULER_CAP_PRIORITY);
117 		else
118 			disabled |= (I915_SCHEDULER_CAP_ENABLED |
119 				     I915_SCHEDULER_CAP_PRIORITY);
120 
121 		if (intel_uc_uses_guc_submission(&engine->gt->uc))
122 			enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
123 
124 		for (i = 0; i < ARRAY_SIZE(map); i++) {
125 			if (engine->flags & BIT(map[i].engine))
126 				enabled |= BIT(map[i].sched);
127 			else
128 				disabled |= BIT(map[i].sched);
129 		}
130 	}
131 
132 	i915->caps.scheduler = enabled & ~disabled;
133 	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
134 		i915->caps.scheduler = 0;
135 }
136 
137 const char *intel_engine_class_repr(u8 class)
138 {
139 	static const char * const uabi_names[] = {
140 		[RENDER_CLASS] = "rcs",
141 		[COPY_ENGINE_CLASS] = "bcs",
142 		[VIDEO_DECODE_CLASS] = "vcs",
143 		[VIDEO_ENHANCEMENT_CLASS] = "vecs",
144 		[OTHER_CLASS] = "other",
145 		[COMPUTE_CLASS] = "ccs",
146 	};
147 
148 	if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
149 		return "xxx";
150 
151 	return uabi_names[class];
152 }
153 
154 struct legacy_ring {
155 	struct intel_gt *gt;
156 	u8 class;
157 	u8 instance;
158 };
159 
160 static int legacy_ring_idx(const struct legacy_ring *ring)
161 {
162 	static const struct {
163 		u8 base, max;
164 	} map[] = {
165 		[RENDER_CLASS] = { RCS0, 1 },
166 		[COPY_ENGINE_CLASS] = { BCS0, 1 },
167 		[VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
168 		[VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
169 		[COMPUTE_CLASS] = { CCS0, I915_MAX_CCS },
170 	};
171 
172 	if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
173 		return INVALID_ENGINE;
174 
175 	if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
176 		return INVALID_ENGINE;
177 
178 	return map[ring->class].base + ring->instance;
179 }
180 
181 static void add_legacy_ring(struct legacy_ring *ring,
182 			    struct intel_engine_cs *engine)
183 {
184 	if (engine->gt != ring->gt || engine->class != ring->class) {
185 		ring->gt = engine->gt;
186 		ring->class = engine->class;
187 		ring->instance = 0;
188 	}
189 
190 	engine->legacy_idx = legacy_ring_idx(ring);
191 	if (engine->legacy_idx != INVALID_ENGINE)
192 		ring->instance++;
193 }
194 
195 static void engine_rename(struct intel_engine_cs *engine, const char *name, u16 instance)
196 {
197 	char old[sizeof(engine->name)];
198 
199 	memcpy(old, engine->name, sizeof(engine->name));
200 	scnprintf(engine->name, sizeof(engine->name), "%s%u", name, instance);
201 	drm_dbg(&engine->i915->drm, "renamed %s to %s\n", old, engine->name);
202 }
203 
204 void intel_engines_driver_register(struct drm_i915_private *i915)
205 {
206 	u16 name_instance, other_instance = 0;
207 	struct legacy_ring ring = {};
208 	struct list_head *it, *next;
209 	struct rb_node **p, *prev;
210 	LIST_HEAD(engines);
211 
212 	sort_engines(i915, &engines);
213 
214 	prev = NULL;
215 	p = &i915->uabi_engines.rb_node;
216 	list_for_each_safe(it, next, &engines) {
217 		struct intel_engine_cs *engine =
218 			container_of(it, typeof(*engine), uabi_list);
219 
220 		if (intel_gt_has_unrecoverable_error(engine->gt))
221 			continue; /* ignore incomplete engines */
222 
223 		GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
224 		engine->uabi_class = uabi_classes[engine->class];
225 		if (engine->uabi_class == I915_NO_UABI_CLASS) {
226 			name_instance = other_instance++;
227 		} else {
228 			GEM_BUG_ON(engine->uabi_class >=
229 				   ARRAY_SIZE(i915->engine_uabi_class_count));
230 			name_instance =
231 				i915->engine_uabi_class_count[engine->uabi_class]++;
232 		}
233 		engine->uabi_instance = name_instance;
234 
235 		/*
236 		 * Replace the internal name with the final user and log facing
237 		 * name.
238 		 */
239 		engine_rename(engine,
240 			      intel_engine_class_repr(engine->class),
241 			      name_instance);
242 
243 		if (engine->uabi_class == I915_NO_UABI_CLASS)
244 			continue;
245 
246 		rb_link_node(&engine->uabi_node, prev, p);
247 		rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
248 
249 		GEM_BUG_ON(intel_engine_lookup_user(i915,
250 						    engine->uabi_class,
251 						    engine->uabi_instance) != engine);
252 
253 		/* Fix up the mapping to match default execbuf::user_map[] */
254 		add_legacy_ring(&ring, engine);
255 
256 		prev = &engine->uabi_node;
257 		p = &prev->rb_right;
258 	}
259 
260 	if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
261 	    IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
262 		struct intel_engine_cs *engine;
263 		unsigned int isolation;
264 		int class, inst;
265 		int errors = 0;
266 
267 		for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) {
268 			for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) {
269 				engine = intel_engine_lookup_user(i915,
270 								  class, inst);
271 				if (!engine) {
272 					pr_err("UABI engine not found for { class:%d, instance:%d }\n",
273 					       class, inst);
274 					errors++;
275 					continue;
276 				}
277 
278 				if (engine->uabi_class != class ||
279 				    engine->uabi_instance != inst) {
280 					pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
281 					       engine->name,
282 					       engine->uabi_class,
283 					       engine->uabi_instance,
284 					       class, inst);
285 					errors++;
286 					continue;
287 				}
288 			}
289 		}
290 
291 		/*
292 		 * Make sure that classes with multiple engine instances all
293 		 * share the same basic configuration.
294 		 */
295 		isolation = intel_engines_has_context_isolation(i915);
296 		for_each_uabi_engine(engine, i915) {
297 			unsigned int bit = BIT(engine->uabi_class);
298 			unsigned int expected = engine->default_state ? bit : 0;
299 
300 			if ((isolation & bit) != expected) {
301 				pr_err("mismatching default context state for class %d on engine %s\n",
302 				       engine->uabi_class, engine->name);
303 				errors++;
304 			}
305 		}
306 
307 		if (drm_WARN(&i915->drm, errors,
308 			     "Invalid UABI engine mapping found"))
309 			i915->uabi_engines = RB_ROOT;
310 	}
311 
312 	set_scheduler_caps(i915);
313 }
314 
315 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
316 {
317 	struct intel_engine_cs *engine;
318 	unsigned int which;
319 
320 	which = 0;
321 	for_each_uabi_engine(engine, i915)
322 		if (engine->default_state)
323 			which |= BIT(engine->uabi_class);
324 
325 	return which;
326 }
327