xref: /linux/drivers/gpu/drm/i915/gt/intel_engine_user.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <linux/list.h>
7 #include <linux/list_sort.h>
8 #include <linux/llist.h>
9 
10 #include <drm/drm_print.h>
11 
12 #include "i915_drv.h"
13 #include "intel_engine.h"
14 #include "intel_engine_user.h"
15 #include "intel_gt.h"
16 #include "uc/intel_guc_submission.h"
17 
18 struct intel_engine_cs *
19 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
20 {
21 	struct rb_node *p = i915->uabi_engines.rb_node;
22 
23 	while (p) {
24 		struct intel_engine_cs *it =
25 			rb_entry(p, typeof(*it), uabi_node);
26 
27 		if (class < it->uabi_class)
28 			p = p->rb_left;
29 		else if (class > it->uabi_class ||
30 			 instance > it->uabi_instance)
31 			p = p->rb_right;
32 		else if (instance < it->uabi_instance)
33 			p = p->rb_left;
34 		else
35 			return it;
36 	}
37 
38 	return NULL;
39 }
40 
41 void intel_engine_add_user(struct intel_engine_cs *engine)
42 {
43 	llist_add(&engine->uabi_llist, &engine->i915->uabi_engines_llist);
44 }
45 
46 #define I915_NO_UABI_CLASS ((u16)(-1))
47 
48 static const u16 uabi_classes[] = {
49 	[RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
50 	[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
51 	[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
52 	[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
53 	[COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
54 	[OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
55 };
56 
57 static int engine_cmp(void *priv, const struct list_head *A,
58 		      const struct list_head *B)
59 {
60 	const struct intel_engine_cs *a =
61 		container_of(A, typeof(*a), uabi_list);
62 	const struct intel_engine_cs *b =
63 		container_of(B, typeof(*b), uabi_list);
64 
65 	if (uabi_classes[a->class] < uabi_classes[b->class])
66 		return -1;
67 	if (uabi_classes[a->class] > uabi_classes[b->class])
68 		return 1;
69 
70 	if (a->instance < b->instance)
71 		return -1;
72 	if (a->instance > b->instance)
73 		return 1;
74 
75 	return 0;
76 }
77 
78 static struct llist_node *get_engines(struct drm_i915_private *i915)
79 {
80 	return llist_del_all(&i915->uabi_engines_llist);
81 }
82 
83 static void sort_engines(struct drm_i915_private *i915,
84 			 struct list_head *engines)
85 {
86 	struct llist_node *pos, *next;
87 
88 	llist_for_each_safe(pos, next, get_engines(i915)) {
89 		struct intel_engine_cs *engine =
90 			container_of(pos, typeof(*engine), uabi_llist);
91 		list_add(&engine->uabi_list, engines);
92 	}
93 	list_sort(NULL, engines, engine_cmp);
94 }
95 
96 static void set_scheduler_caps(struct drm_i915_private *i915)
97 {
98 	static const struct {
99 		u8 engine;
100 		u8 sched;
101 	} map[] = {
102 #define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
103 		MAP(HAS_PREEMPTION, PREEMPTION),
104 		MAP(HAS_SEMAPHORES, SEMAPHORES),
105 		MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
106 #undef MAP
107 	};
108 	struct intel_engine_cs *engine;
109 	u32 enabled, disabled;
110 
111 	enabled = 0;
112 	disabled = 0;
113 	for_each_uabi_engine(engine, i915) { /* all engines must agree! */
114 		int i;
115 
116 		if (engine->sched_engine->schedule)
117 			enabled |= (I915_SCHEDULER_CAP_ENABLED |
118 				    I915_SCHEDULER_CAP_PRIORITY);
119 		else
120 			disabled |= (I915_SCHEDULER_CAP_ENABLED |
121 				     I915_SCHEDULER_CAP_PRIORITY);
122 
123 		if (intel_uc_uses_guc_submission(&engine->gt->uc))
124 			enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
125 
126 		for (i = 0; i < ARRAY_SIZE(map); i++) {
127 			if (engine->flags & BIT(map[i].engine))
128 				enabled |= BIT(map[i].sched);
129 			else
130 				disabled |= BIT(map[i].sched);
131 		}
132 	}
133 
134 	i915->caps.scheduler = enabled & ~disabled;
135 	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
136 		i915->caps.scheduler = 0;
137 }
138 
139 const char *intel_engine_class_repr(u8 class)
140 {
141 	static const char * const uabi_names[] = {
142 		[RENDER_CLASS] = "rcs",
143 		[COPY_ENGINE_CLASS] = "bcs",
144 		[VIDEO_DECODE_CLASS] = "vcs",
145 		[VIDEO_ENHANCEMENT_CLASS] = "vecs",
146 		[OTHER_CLASS] = "other",
147 		[COMPUTE_CLASS] = "ccs",
148 	};
149 
150 	if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
151 		return "xxx";
152 
153 	return uabi_names[class];
154 }
155 
156 struct legacy_ring {
157 	struct intel_gt *gt;
158 	u8 class;
159 	u8 instance;
160 };
161 
162 static int legacy_ring_idx(const struct legacy_ring *ring)
163 {
164 	static const struct {
165 		u8 base, max;
166 	} map[] = {
167 		[RENDER_CLASS] = { RCS0, 1 },
168 		[COPY_ENGINE_CLASS] = { BCS0, 1 },
169 		[VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
170 		[VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
171 		[COMPUTE_CLASS] = { CCS0, I915_MAX_CCS },
172 	};
173 
174 	if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
175 		return INVALID_ENGINE;
176 
177 	if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
178 		return INVALID_ENGINE;
179 
180 	return map[ring->class].base + ring->instance;
181 }
182 
183 static void add_legacy_ring(struct legacy_ring *ring,
184 			    struct intel_engine_cs *engine)
185 {
186 	if (engine->gt != ring->gt || engine->class != ring->class) {
187 		ring->gt = engine->gt;
188 		ring->class = engine->class;
189 		ring->instance = 0;
190 	}
191 
192 	engine->legacy_idx = legacy_ring_idx(ring);
193 	if (engine->legacy_idx != INVALID_ENGINE)
194 		ring->instance++;
195 }
196 
197 static void engine_rename(struct intel_engine_cs *engine, const char *name, u16 instance)
198 {
199 	char old[sizeof(engine->name)];
200 
201 	memcpy(old, engine->name, sizeof(engine->name));
202 	scnprintf(engine->name, sizeof(engine->name), "%s%u", name, instance);
203 	drm_dbg(&engine->i915->drm, "renamed %s to %s\n", old, engine->name);
204 }
205 
206 void intel_engines_driver_register(struct drm_i915_private *i915)
207 {
208 	u16 name_instance, other_instance = 0;
209 	struct legacy_ring ring = {};
210 	struct list_head *it, *next;
211 	struct rb_node **p, *prev;
212 	LIST_HEAD(engines);
213 
214 	sort_engines(i915, &engines);
215 
216 	prev = NULL;
217 	p = &i915->uabi_engines.rb_node;
218 	list_for_each_safe(it, next, &engines) {
219 		struct intel_engine_cs *engine =
220 			container_of(it, typeof(*engine), uabi_list);
221 
222 		if (intel_gt_has_unrecoverable_error(engine->gt))
223 			continue; /* ignore incomplete engines */
224 
225 		GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
226 		engine->uabi_class = uabi_classes[engine->class];
227 		if (engine->uabi_class == I915_NO_UABI_CLASS) {
228 			name_instance = other_instance++;
229 		} else {
230 			GEM_BUG_ON(engine->uabi_class >=
231 				   ARRAY_SIZE(i915->engine_uabi_class_count));
232 			name_instance =
233 				i915->engine_uabi_class_count[engine->uabi_class]++;
234 		}
235 		engine->uabi_instance = name_instance;
236 
237 		/*
238 		 * Replace the internal name with the final user and log facing
239 		 * name.
240 		 */
241 		engine_rename(engine,
242 			      intel_engine_class_repr(engine->class),
243 			      name_instance);
244 
245 		if (engine->uabi_class == I915_NO_UABI_CLASS)
246 			continue;
247 
248 		rb_link_node(&engine->uabi_node, prev, p);
249 		rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
250 
251 		GEM_BUG_ON(intel_engine_lookup_user(i915,
252 						    engine->uabi_class,
253 						    engine->uabi_instance) != engine);
254 
255 		/* Fix up the mapping to match default execbuf::user_map[] */
256 		add_legacy_ring(&ring, engine);
257 
258 		prev = &engine->uabi_node;
259 		p = &prev->rb_right;
260 	}
261 
262 	if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
263 	    IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
264 		struct intel_engine_cs *engine;
265 		unsigned int isolation;
266 		int class, inst;
267 		int errors = 0;
268 
269 		for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) {
270 			for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) {
271 				engine = intel_engine_lookup_user(i915,
272 								  class, inst);
273 				if (!engine) {
274 					pr_err("UABI engine not found for { class:%d, instance:%d }\n",
275 					       class, inst);
276 					errors++;
277 					continue;
278 				}
279 
280 				if (engine->uabi_class != class ||
281 				    engine->uabi_instance != inst) {
282 					pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
283 					       engine->name,
284 					       engine->uabi_class,
285 					       engine->uabi_instance,
286 					       class, inst);
287 					errors++;
288 					continue;
289 				}
290 			}
291 		}
292 
293 		/*
294 		 * Make sure that classes with multiple engine instances all
295 		 * share the same basic configuration.
296 		 */
297 		isolation = intel_engines_has_context_isolation(i915);
298 		for_each_uabi_engine(engine, i915) {
299 			unsigned int bit = BIT(engine->uabi_class);
300 			unsigned int expected = engine->default_state ? bit : 0;
301 
302 			if ((isolation & bit) != expected) {
303 				pr_err("mismatching default context state for class %d on engine %s\n",
304 				       engine->uabi_class, engine->name);
305 				errors++;
306 			}
307 		}
308 
309 		if (drm_WARN(&i915->drm, errors,
310 			     "Invalid UABI engine mapping found"))
311 			i915->uabi_engines = RB_ROOT;
312 	}
313 
314 	set_scheduler_caps(i915);
315 }
316 
317 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
318 {
319 	struct intel_engine_cs *engine;
320 	unsigned int which;
321 
322 	which = 0;
323 	for_each_uabi_engine(engine, i915)
324 		if (engine->default_state)
325 			which |= BIT(engine->uabi_class);
326 
327 	return which;
328 }
329